migrator.py 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278
  1. import logging
  2. import shutil
  3. from io import BytesIO
  4. from contextlib import ContextDecorator
  5. from os import makedirs, path
  6. from urllib.parse import urldefrag
  7. import lmdb
  8. import requests
  9. import yaml
  10. from rdflib import Graph, URIRef
  11. from lakesuperior.dictionaries.namespaces import ns_collection as nsc
  12. from lakesuperior.env import env
  13. from lakesuperior.globals import AppGlobals, ROOT_UID
  14. from lakesuperior.config_parser import parse_config
  15. from lakesuperior.store.ldp_rs.lmdb_store import TxnManager
  16. logger = logging.getLogger(__name__)
  17. class StoreWrapper(ContextDecorator):
  18. '''
  19. Open and close a store.
  20. '''
  21. def __init__(self, store):
  22. self.store = store
  23. def __enter__(self):
  24. self.store.open(
  25. env.config['application']['store']['ldp_rs'])
  26. def __exit__(self, *exc):
  27. self.store.close()
  28. class Migrator:
  29. """
  30. Class to handle a database migration.
  31. This class holds state of progress and shared variables as it crawls
  32. through linked resources in an LDP server.
  33. Since a repository migration can be a very long operation but it is
  34. impossible to know the number of the resources to gather by LDP interaction
  35. alone, a progress ticker outputs the number of processed resources at
  36. regular intervals.
  37. """
  38. """
  39. LMDB database parameters.
  40. See :meth:`lmdb.Environment.__init__`
  41. """
  42. db_params = {
  43. 'map_size': 1024 ** 4,
  44. 'metasync': False,
  45. 'readahead': False,
  46. 'meminit': False,
  47. }
  48. """List of predicates to ignore when looking for links."""
  49. ignored_preds = (
  50. nsc['fcrepo'].hasParent,
  51. nsc['fcrepo'].hasTransactionProvider,
  52. nsc['fcrepo'].hasFixityService,
  53. )
  54. def __init__(
  55. self, src, dest, zero_binaries=False, compact_uris=False,
  56. skip_errors=False):
  57. """
  58. Set up base paths and clean up existing directories.
  59. :param src: (URIRef) Webroot of source repository. This must
  60. correspond to the LDP root node (for Fedora it can be e.g.
  61. ``http://localhost:8080fcrepo/rest/``) and is used to determine if URIs
  62. retrieved are managed by this repository.
  63. :param dest: (str) Destination repository path. If the location exists
  64. it must be a writable directory. It will be deleted and recreated. If
  65. it does not exist, it will be created along with its parents if
  66. missing.
  67. :param binary_handling: (string) One of ``include``, ``truncate`` or
  68. ``split``.
  69. :param compact_uris: (bool) NOT IMPLEMENTED. Whether the process should
  70. attempt to compact URIs generated with broken up path segments. If the
  71. UID matches a pattern such as `/12/34/56/123456...` it is converted to
  72. `/123456...`. This would remove a lot of cruft caused by the pairtree
  73. segments. Note that this will change the publicly exposed URIs. If
  74. durability is a concern, a rewrite directive can be added to the HTTP
  75. server that proxies the WSGI endpoint.
  76. """
  77. # Set up repo folder structure and copy default configuration to
  78. # destination file.
  79. cur_dir = path.dirname(path.dirname(path.abspath(__file__)))
  80. self.dbpath = '{}/data/ldprs_store'.format(dest)
  81. self.fpath = '{}/data/ldpnr_store'.format(dest)
  82. self.config_dir = '{}/etc'.format(dest)
  83. shutil.rmtree(dest, ignore_errors=True)
  84. shutil.copytree(
  85. '{}/etc.defaults'.format(cur_dir), self.config_dir)
  86. # Modify and overwrite destination configuration.
  87. orig_config, _ = parse_config(self.config_dir)
  88. orig_config['application']['store']['ldp_rs']['location'] = self.dbpath
  89. orig_config['application']['store']['ldp_nr']['path'] = self.fpath
  90. with open('{}/application.yml'.format(self.config_dir), 'w') \
  91. as config_file:
  92. config_file.write(yaml.dump(orig_config['application']))
  93. env.config, _ = parse_config(self.config_dir)
  94. env.app_globals = AppGlobals(env.config)
  95. self.rdfly = env.app_globals.rdfly
  96. self.nonrdfly = env.app_globals.nonrdfly
  97. with TxnManager(env.app_globals.rdf_store, write=True) as txn:
  98. self.rdfly.bootstrap()
  99. self.rdfly.store.close()
  100. env.app_globals.nonrdfly.bootstrap()
  101. self.src = src.rstrip('/')
  102. self.zero_binaries = zero_binaries
  103. self.skip_errors = skip_errors
  104. def migrate(self, start_pts=None, list_file=None):
  105. """
  106. Migrate the database.
  107. This method creates a fully functional and configured LAKEsuperior
  108. data set contained in a folder from an LDP repository.
  109. :param tuple|list start_pts: List of starting points to retrieve
  110. resources from. It would typically be the repository root in case of a
  111. full dump or one or more resources in the repository for a partial one.
  112. :param str listf_ile: path to a local file containing a list of URIs,
  113. one per line.
  114. """
  115. from lakesuperior.api import resource as rsrc_api
  116. self._ct = 0
  117. with StoreWrapper(self.rdfly.store):
  118. if start_pts:
  119. for start in start_pts:
  120. if not start.startswith('/'):
  121. raise ValueError(
  122. 'Starting point {} does not begin with a slash.'
  123. .format(start))
  124. if start != ROOT_UID:
  125. # Create the full hierarchy with link to the parents.
  126. rsrc_api.create_or_replace(start)
  127. # Then populate the new resource and crawl for more
  128. # relationships.
  129. self._crawl(start)
  130. elif list_file:
  131. with open(list_file, 'r') as fp:
  132. for uri in fp:
  133. uid = uri.strip().replace(self.src, '')
  134. if uid != ROOT_UID:
  135. rsrc_api.create_or_replace(uid)
  136. self._crawl(uid)
  137. logger.info('Dumped {} resources.'.format(self._ct))
  138. return self._ct
  139. def _crawl(self, uid):
  140. """
  141. Get the contents of a resource and its relationships recursively.
  142. This method recurses into itself each time a reference to a resource
  143. managed by the repository is encountered.
  144. @param uid (string) The path relative to the source server webroot
  145. pointing to the resource to crawl, effectively the resource UID.
  146. """
  147. ibase = str(nsc['fcres'])
  148. # Public URI of source repo.
  149. uri = self.src + uid
  150. # Internal URI of destination.
  151. iuri = ibase + uid
  152. rsp = requests.head(uri)
  153. if not self.skip_errors:
  154. rsp.raise_for_status()
  155. elif rsp.status_code > 399:
  156. print('Error retrieving resource {} headers: {} {}'.format(
  157. uri, rsp.status_code, rsp.text))
  158. # Determine LDP type.
  159. ldp_type = 'ldp_nr'
  160. try:
  161. for link in requests.utils.parse_header_links(
  162. rsp.headers.get('link')):
  163. if (
  164. link.get('rel') == 'type'
  165. and (
  166. link.get('url') == str(nsc['ldp'].RDFSource)
  167. or link.get('url') == str(nsc['ldp'].Container))
  168. ):
  169. # Resource is an LDP-RS.
  170. ldp_type = 'ldp_rs'
  171. break
  172. except TypeError:
  173. ldp_type = 'ldp_rs'
  174. #raise ValueError('URI {} is not an LDP resource.'.format(uri))
  175. # Get the whole RDF document now because we have to know all outbound
  176. # links.
  177. get_uri = (
  178. uri if ldp_type == 'ldp_rs' else '{}/fcr:metadata'.format(uri))
  179. get_rsp = requests.get(get_uri)
  180. if not self.skip_errors:
  181. get_rsp.raise_for_status()
  182. elif get_rsp.status_code > 399:
  183. print('Error retrieving resource {} body: {} {}'.format(
  184. uri, get_rsp.status_code, get_rsp.text))
  185. data = get_rsp.content.replace(
  186. self.src.encode('utf-8'), ibase.encode('utf-8'))
  187. gr = Graph(identifier=iuri).parse(data=data, format='turtle')
  188. # Store raw graph data. No checks.
  189. with TxnManager(self.rdfly.store, True):
  190. self.rdfly.modify_rsrc(uid, add_trp=set(gr))
  191. # Grab binary and set new resource parameters.
  192. if ldp_type == 'ldp_nr':
  193. provided_imr = gr.resource(URIRef(iuri))
  194. if self.zero_binaries:
  195. data = b''
  196. else:
  197. bin_rsp = requests.get(uri)
  198. if not self.skip_errors:
  199. bin_rsp.raise_for_status()
  200. elif bin_rsp.status_code > 399:
  201. print('Error retrieving resource {} body: {} {}'.format(
  202. uri, bin_rsp.status_code, bin_rsp.text))
  203. data = bin_rsp.content
  204. #import pdb; pdb.set_trace()
  205. uuid = str(gr.value(
  206. URIRef(iuri), nsc['premis'].hasMessageDigest)).split(':')[-1]
  207. fpath = self.nonrdfly.local_path(
  208. self.nonrdfly.config['path'], uuid)
  209. makedirs(path.dirname(fpath), exist_ok=True)
  210. with open(fpath, 'wb') as fh:
  211. fh.write(data)
  212. self._ct += 1
  213. if self._ct % 10 == 0:
  214. print('{} resources processed so far.'.format(self._ct))
  215. # Now, crawl through outbound links.
  216. # LDP-NR fcr:metadata must be checked too.
  217. for pred, obj in gr.predicate_objects():
  218. #import pdb; pdb.set_trace()
  219. obj_uid = obj.replace(ibase, '')
  220. with TxnManager(self.rdfly.store, True):
  221. conditions = bool(
  222. isinstance(obj, URIRef)
  223. and obj.startswith(iuri)
  224. # Avoid ∞ loop with fragment URIs.
  225. and str(urldefrag(obj).url) != str(iuri)
  226. # Avoid ∞ loop with circular references.
  227. and not self.rdfly.ask_rsrc_exists(obj_uid)
  228. and pred not in self.ignored_preds
  229. )
  230. if conditions:
  231. print('Object {} will be crawled.'.format(obj_uid))
  232. self._crawl(urldefrag(obj_uid).url)