{ "cells": [ { "cell_type": "code", "execution_count": 10, "metadata": {}, "outputs": [], "source": [ "# Utilities to read/write csv files\n", "import csv\n", "import unicodedata\n", "# Ordered Dicts\n", "from collections import OrderedDict\n", "import json\n", "\n", "\n", "# OPZIONAL IMPORTS\n", "\n", "# For timestamping/simple speed tests\n", "from datetime import datetime\n", "# Random number generator\n", "from random import *\n", "# System & command line utilities\n", "import sys\n", "# Json for the dictionary\n", "import json" ] }, { "cell_type": "code", "execution_count": 11, "metadata": {}, "outputs": [], "source": [ "import_dir = '/Users/federicaspinelli/TEAMOVI/Parser/DATA/OVI/CSV/'\n", "export_dir = '/Users/federicaspinelli/TEAMOVI/Parser/DATA/OVI/RDF/'" ] }, { "cell_type": "markdown", "metadata": {}, "source": [] }, { "cell_type": "code", "execution_count": 12, "metadata": {}, "outputs": [], "source": [ "# Custom class to store URIs + related infos for the ontologies/repositories\n", "\n", "class RDFcoords:\n", " def __init__(self, uri, prefix, code = None):\n", " self.uri = uri\n", " self.prefix = prefix\n", " self.code = code\n", "\n", "# Repositories\n", "museoCoords = RDFcoords('', 'mpp:')\n", "autCoords = RDFcoords('', 'aut:')\n", "cidocCoords = RDFcoords('', 'crm:')\n", "aatCoords = RDFcoords('', 'aat:')\n", "nsCoords = RDFcoords('', 'rdf:')\n", "schemaCoords = RDFcoords('', 'rdfs:')\n", "subClassOfCoords = RDFcoords('', 'so:')\n", "datiniCoords = RDFcoords('', 'dt:')\n", "personAuthCoords = RDFcoords('', 'pa:')\n", "\n", "# W3/CIDOC Predicates\n", "hasTypeCoords = RDFcoords('', 'tp:')\n", "hasTypePCoords = RDFcoords('', 'te:')\n", "carriesCoords = RDFcoords('', 'ca:')\n", "identifiedByCoords = RDFcoords('', 'ib:')\n", "labelCoords = RDFcoords('', 'lb:')\n", "wasBroughtCoords = RDFcoords('', 'wb:')\n", "carriedByCoords = RDFcoords('', 'cb:')\n", "hasAlternativeFormCoords = RDFcoords('', 'af:')\n", "hasNoteCoords = RDFcoords('', 'no:')\n", "hasTypeNCoords = RDFcoords('', 'tn:')\n", "hasLanguageCoords = RDFcoords('', 'hl:')\n", "documentsCoords = RDFcoords('', 'ds:')\n", "hasComponentCoords = RDFcoords('', 'hc:')\n", "\n", "# CIDOC Objects\n", "manMadeObjectCoords = RDFcoords('', 'mo:', 'E22')\n", "informationObjectCoords = RDFcoords('', 'io:', 'E73')\n", "titleCoords = RDFcoords('', 'ti:' ,'E35')\n", "placeAppellationCoords = RDFcoords('', 'pa:', 'E44')\n", "identifierCoords = RDFcoords('', 'id:', 'E42')\n", "typeCoords = RDFcoords('', 'ty:', 'E55')\n", "creationCoords = RDFcoords('', 'cr:', 'E65')\n", "personCoords = RDFcoords('', 'ps:', 'E21')\n", "stringCoords = RDFcoords('', 'sr:', 'E62')\n", "linguisticObjCoords = RDFcoords('', 'lj:', 'E33')\n", "languageCoords = RDFcoords('', 'ln:', 'E56')\n", "appellationCoords = RDFcoords('', 'ap:', 'E41')\n", "propositionalObjCoords = RDFcoords('', 'pj:', 'E89')\n" ] }, { "cell_type": "code", "execution_count": 13, "metadata": {}, "outputs": [], "source": [ "# Basic functions for triples / shortened triples in TTL format\n", "\n", "def triple(subject, predicate, object1):\n", " line = subject + ' ' + predicate + ' ' + object1\n", " return line\n", "\n", "def doublet(predicate, object1):\n", " line = ' ' + predicate + ' ' + object1\n", " return line\n", "\n", "def singlet(object1):\n", " line = ' ' + object1\n", " return line\n", "\n", "# Line endings in TTL format\n", "continueLine1 = ' ;\\n'\n", "continueLine2 = ' ,\\n'\n", "closeLine = ' .\\n'" ] }, { "cell_type": "code", "execution_count": 14, "metadata": {}, "outputs": [], "source": [ "def writeTTLHeader(output):\n", " output.write('@prefix ' + datiniCoords.prefix + ' ' + datiniCoords.uri + closeLine)\n", " output.write('@prefix ' + personAuthCoords.prefix + ' ' + personAuthCoords.uri + closeLine)\n", " output.write('@prefix ' + hasTypeCoords.prefix + ' ' + hasTypeCoords.uri + closeLine)\n", " output.write('@prefix ' + hasTypePCoords.prefix + ' ' + hasTypePCoords.uri + closeLine)\n", " output.write('@prefix ' + manMadeObjectCoords.prefix + ' ' + manMadeObjectCoords.uri + closeLine)\n", " output.write('@prefix ' + carriesCoords.prefix + ' ' + carriesCoords.uri + closeLine)\n", " output.write('@prefix ' + informationObjectCoords.prefix + ' ' + informationObjectCoords.uri + closeLine)\n", " output.write('@prefix ' + identifiedByCoords.prefix + ' ' + identifiedByCoords.uri + closeLine)\n", " output.write('@prefix ' + titleCoords.prefix + ' ' + titleCoords.uri + closeLine)\n", " output.write('@prefix ' + labelCoords.prefix + ' ' + labelCoords.uri + closeLine)\n", " output.write('@prefix ' + identifierCoords.prefix + ' ' + identifierCoords.uri + closeLine)\n", " output.write('@prefix ' + wasBroughtCoords.prefix + ' ' + wasBroughtCoords.uri + closeLine)\n", " output.write('@prefix ' + typeCoords.prefix + ' ' + typeCoords.uri + closeLine)\n", " output.write('@prefix ' + carriedByCoords.prefix + ' ' + carriedByCoords.uri + closeLine)\n", " output.write('@prefix ' + personCoords.prefix + ' ' + personCoords.uri + closeLine)\n", " output.write('@prefix ' + hasAlternativeFormCoords.prefix + ' ' + hasAlternativeFormCoords.uri + closeLine)\n", " output.write('@prefix ' + hasNoteCoords.prefix + ' ' + hasNoteCoords.uri + closeLine)\n", " output.write('@prefix ' + hasTypeNCoords.prefix + ' ' + hasTypeNCoords.uri + closeLine)\n", " output.write('@prefix ' + stringCoords.prefix + ' ' + stringCoords.uri + closeLine)\n", " output.write('@prefix ' + linguisticObjCoords.prefix + ' ' + linguisticObjCoords.uri + closeLine)\n", " output.write('@prefix ' + languageCoords.prefix + ' ' + languageCoords.uri + closeLine)\n", " output.write('@prefix ' + hasLanguageCoords.prefix + ' ' + hasLanguageCoords.uri + closeLine)\n", " output.write('@prefix ' + documentsCoords.prefix + ' ' + documentsCoords.uri + closeLine)\n", " output.write('@prefix ' + appellationCoords.prefix + ' ' + appellationCoords.uri + closeLine)\n", " output.write('@prefix ' + propositionalObjCoords.prefix + ' ' + propositionalObjCoords.uri + closeLine)\n", " output.write('@prefix ' + hasComponentCoords.prefix + ' ' + hasComponentCoords.uri + closeLine)\n", " output.write('@prefix ' + museoCoords.prefix + ' ' + museoCoords.uri + closeLine)\n", " output.write('@prefix ' + cidocCoords.prefix + ' ' + cidocCoords.uri + closeLine)\n", " output.write('@prefix ' + aatCoords.prefix + ' ' + aatCoords.uri + closeLine)\n", " output.write('@prefix ' + schemaCoords.prefix + ' ' + schemaCoords.uri + closeLine)\n", " output.write('@prefix ' + nsCoords.prefix + ' ' + nsCoords.uri + closeLine)\n", " output.write('@prefix ' + autCoords.prefix + ' ' + autCoords.uri + closeLine)\n", " output.write('@prefix ' + subClassOfCoords.prefix + ' ' + subClassOfCoords.uri + closeLine)\n", " output.write('\\n')\n" ] }, { "cell_type": "code", "execution_count": 15, "metadata": { "tags": [] }, "outputs": [], "source": [ "filePrefix = 'merge'\n", "fileType = 'OVI-TLIO'\n", "max_entries = 10000000000000000\n", "\n", "def get_merce(lemma, iperlemma):\n", " merci_file = open(\n", " '/Users/federicaspinelli/TEAMOVI/Parser/DATA/OVI/CSV/OVI_lemmi_iperlemmi_OLD - Lemmi con Merce.csv', newline=\"\")\n", " reader = csv.DictReader(merci_file)\n", " for row in reader:\n", " if (row['lemma'] == lemma and row['iperlemma'] == iperlemma):\n", " return [row['merce']]\n", "\n", "\n", "with open(import_dir + filePrefix + fileType + '.csv', newline=\"\") as csv_file, open(export_dir + filePrefix + fileType + '.ttl', 'w') as output:\n", " reader = csv.DictReader(csv_file)\n", " writeTTLHeader(output)\n", " first = True\n", " ii = 0\n", " e55placeHolder = ''\n", " line = triple(e55placeHolder, schemaCoords.prefix +\n", " 'label', '\\\"Lemma\\\"') + closeLine\n", " output.write(line)\n", " line = triple(e55placeHolder, nsCoords.prefix +\n", " 'type', '\\\"Lemma\\\"') + closeLine\n", " output.write(line)\n", " e33POS55placeHolder = ''\n", " line = triple(e33POS55placeHolder, schemaCoords.prefix + 'label', '\\\"Part of Speech\\\"') + closeLine\n", " output.write(line)\n", " line = triple(e33POS55placeHolder, nsCoords.prefix + 'type', '\\\"Part of Speech\\\"') + closeLine\n", " output.write(line)\n", "\n", " for row in reader:\n", " # The index ii is used to process a limited number of entries for testing purposes\n", " ii = ii+0\n", " e33POSplaceHolder = ''\n", " \n", " e28placeHolder = ''\n", " \n", " if (row['link_tlio']!='' and row['link_tlio'] is not None):\n", " e33placeHolder = ''\n", " elif (row['link_tlio']==''):\n", " e33placeHolder = ''\n", " else:\n", " e33placeHolder = ''\n", " if (row['link_tlio']!='' and row['link_tlio'] is not None):\n", " e28oplaceHolder = ''\n", " elif (row['link_tlio']==''):\n", " e28oplaceHolder = ''\n", " else:\n", " e28oplaceHolder = ''\n", " if (row['link_tlio']!='' and row['link_tlio'] is not None and row['link_tlio']!=' ' and row['link_tlio'] != 'None'):\n", " e70placeHolder = ''\n", " elif (row['link_tlio']== ' '):\n", " e70placeHolder = ''\n", " elif (row['link_tlio']== ''):\n", " e70placeHolder = ''\n", " else:\n", " e70placeHolder = ''\n", "\n", " lemma = row['lemma']\n", " iperlemma = row['iperlemma']\n", " merci = get_merce(lemma, iperlemma)\n", " if (row['thing'] == 'TRUE'):\n", " if (merci is not None):\n", " if (row['id'] is not None and row['id'] != '' and row['id'] != ' '): \n", " e73ASPOplaceHolder = \"\"\n", " line = triple(e73ASPOplaceHolder, cidocCoords.prefix + 'P67_refers_to', e70placeHolder) + closeLine\n", " output.write(line)\n", " if (row['pos'] != 'antr.' and row['pos'] != 'n.g.'):\n", " if (row['id'] is not None and row['id'] != '' and row['id'] != ' '):\n", " e73OVIplaceHolder = \"\"\n", " line = triple(e73OVIplaceHolder, cidocCoords.prefix +\n", " 'P67_refers_to', e70placeHolder) + closeLine\n", " output.write(line)\n", " line = triple(e70placeHolder,\n", " nsCoords.prefix + 'type',\n", " cidocCoords.prefix + 'E70_Thing') + closeLine\n", " output.write(line)\n", " line = triple(e70placeHolder,\n", " schemaCoords.prefix + 'label',\n", " '\\\"' + row['lemma'] + '\\\"') + closeLine\n", " output.write(line)\n", " line = triple(e70placeHolder, cidocCoords.prefix +\n", " 'P128_carries', e33placeHolder) + closeLine\n", " output.write(line)\n", " # E33\n", " line = triple(e33placeHolder,\n", " nsCoords.prefix + 'type',\n", " cidocCoords.prefix + 'E33_Linguistic_Object') + closeLine\n", " output.write(line)\n", " line = triple(e33placeHolder,\n", " schemaCoords.prefix + 'label',\n", " '\\\"' + row['lemma'] + '\\\"') + closeLine\n", " output.write(line)\n", " line = triple(e33placeHolder,\n", " nsCoords.prefix + 'type',\n", " e55placeHolder) + closeLine\n", " output.write(line)\n", " if (row['iperlemma'] != ''):\n", " line = triple(e33placeHolder,\n", " subClassOfCoords.prefix,\n", " e28placeHolder) + closeLine\n", " output.write(line)\n", " if (row['pos'] != ''):\n", " line = triple(e33placeHolder,\n", " cidocCoords.prefix + 'P2_has_type',\n", " e33POSplaceHolder) + closeLine\n", " output.write(line)\n", " line = triple(e33POSplaceHolder, schemaCoords.prefix + 'label', '\\\"' + row['pos'] + '\\\"') + closeLine\n", " output.write(line)\n", " line = triple(e33POSplaceHolder,\n", " nsCoords.prefix + 'type',\n", " e33POS55placeHolder) + closeLine\n", " output.write(line)\n", " if (row['commento'] != ''):\n", " line = triple(e33placeHolder,\n", " cidocCoords.prefix + 'P3_has_note',\n", " '\\\"' + row['commento'] + '\\\"') + closeLine\n", " output.write(line)\n", " elif (row['id'] is None or row['id'] == ''):\n", " line = triple(e70placeHolder,\n", " nsCoords.prefix + 'type',\n", " cidocCoords.prefix + 'E70_Thing') + closeLine\n", " output.write(line)\n", " line = triple(e70placeHolder,\n", " schemaCoords.prefix + 'label',\n", " '\\\"' + row['lemma'] + '\\\"') + closeLine\n", " output.write(line)\n", " line = triple(e70placeHolder, cidocCoords.prefix +\n", " 'P128_carries', e33placeHolder) + closeLine\n", " output.write(line)\n", " # E33\n", " line = triple(e33placeHolder,\n", " nsCoords.prefix + 'type',\n", " cidocCoords.prefix + 'E33_Linguistic_Object') + closeLine\n", " output.write(line)\n", " line = triple(e33placeHolder,\n", " schemaCoords.prefix + 'label',\n", " '\\\"' + row['lemma'] + '\\\"') + closeLine\n", " output.write(line)\n", " line = triple(e33placeHolder,\n", " nsCoords.prefix + 'type',\n", " e55placeHolder) + closeLine\n", " output.write(line)\n", " if (row['iperlemma'] != ''):\n", " line = triple(e33placeHolder,\n", " subClassOfCoords.prefix,\n", " e28placeHolder) + closeLine\n", " output.write(line)\n", " if (row['pos'] != ''):\n", " line = triple(e33placeHolder,\n", " cidocCoords.prefix + 'P2_has_type',\n", " e33POSplaceHolder) + closeLine\n", " output.write(line)\n", " line = triple(e33POSplaceHolder, schemaCoords.prefix + 'label', '\\\"' + row['pos'] + '\\\"') + closeLine\n", " output.write(line)\n", " line = triple(e33POSplaceHolder,\n", " nsCoords.prefix + 'type',\n", " e33POS55placeHolder) + closeLine\n", " output.write(line)\n", " if (row['commento'] != ''):\n", " line = triple(e33placeHolder,\n", " cidocCoords.prefix + 'P3_has_note',\n", " '\\\"' + row['commento'] + '\\\"') + closeLine\n", " output.write(line)\n", " if (row['thing'] == 'FALSE'):\n", " if (row['pos'] != 'antr.' and row['pos'] != 'n.g.'):\n", " if (row['id'] is not None and row['id'] != ''):\n", " e73OVIplaceHolder = \"\"\n", " line = triple(e73OVIplaceHolder, cidocCoords.prefix +\n", " 'P67_refers_to', e28oplaceHolder) + closeLine\n", " output.write(line)\n", " line = triple(e28oplaceHolder,\n", " nsCoords.prefix + 'type',\n", " cidocCoords.prefix + 'E28_Conceptual_Object') + closeLine\n", " output.write(line)\n", " line = triple(e28oplaceHolder,\n", " schemaCoords.prefix + 'label',\n", " '\\\"' + row['lemma'] + '\\\"') + closeLine\n", " output.write(line)\n", " line = triple(e28oplaceHolder, cidocCoords.prefix +\n", " 'P128_carries', e33placeHolder) + closeLine\n", " output.write(line)\n", " # E33\n", " line = triple(e33placeHolder,\n", " nsCoords.prefix + 'type',\n", " cidocCoords.prefix + 'E33_Linguistic_Object') + closeLine\n", " output.write(line)\n", " line = triple(e33placeHolder,\n", " schemaCoords.prefix + 'label',\n", " '\\\"' + row['lemma'] + '\\\"') + closeLine\n", " output.write(line)\n", " line = triple(e33placeHolder,\n", " nsCoords.prefix + 'type',\n", " e55placeHolder) + closeLine\n", " output.write(line)\n", " if (row['iperlemma'] != ''):\n", " line = triple(e33placeHolder,\n", " subClassOfCoords.prefix,\n", " e28placeHolder) + closeLine\n", " output.write(line)\n", " if (row['pos'] != ''):\n", " line = triple(e33placeHolder,\n", " cidocCoords.prefix + 'P2_has_type',\n", " e33POSplaceHolder) + closeLine\n", " output.write(line)\n", " line = triple(e33POSplaceHolder, schemaCoords.prefix + 'label', '\\\"' + row['pos'] + '\\\"') + closeLine\n", " output.write(line)\n", " line = triple(e33POSplaceHolder,\n", " nsCoords.prefix + 'type',\n", " e33POS55placeHolder) + closeLine\n", " output.write(line)\n", " if (row['commento'] != ''):\n", " line = triple(e33placeHolder,\n", " cidocCoords.prefix + 'P3_has_note',\n", " '\\\"' + row['commento'] + '\\\"') + closeLine\n", " output.write(line)\n", " elif (row['id'] is None or row['id'] == ''):\n", " line = triple(e28oplaceHolder,\n", " nsCoords.prefix + 'type',\n", " cidocCoords.prefix + 'E28_Conceptual_Object') + closeLine\n", " output.write(line)\n", " line = triple(e28oplaceHolder,\n", " schemaCoords.prefix + 'label',\n", " '\\\"' + row['lemma'] + '\\\"') + closeLine\n", " output.write(line)\n", " line = triple(e28oplaceHolder, cidocCoords.prefix +\n", " 'P128_carries', e33placeHolder) + closeLine\n", " output.write(line)\n", " # E33\n", " line = triple(e33placeHolder,\n", " nsCoords.prefix + 'type',\n", " cidocCoords.prefix + 'E33_Linguistic_Object') + closeLine\n", " output.write(line)\n", " line = triple(e33placeHolder,\n", " schemaCoords.prefix + 'label',\n", " '\\\"' + row['lemma'] + '\\\"') + closeLine\n", " output.write(line)\n", " line = triple(e33placeHolder,\n", " nsCoords.prefix + 'type',\n", " e55placeHolder) + closeLine\n", " output.write(line)\n", " if (row['iperlemma'] != ''):\n", " line = triple(e33placeHolder,\n", " subClassOfCoords.prefix,\n", " e28placeHolder) + closeLine\n", " output.write(line)\n", " if (row['pos'] != ''):\n", " line = triple(e33placeHolder,\n", " cidocCoords.prefix + 'P2_has_type',\n", " e33POSplaceHolder) + closeLine\n", " output.write(line)\n", " line = triple(e33POSplaceHolder, schemaCoords.prefix + 'label', '\\\"' + row['pos'] + '\\\"') + closeLine\n", " output.write(line)\n", " line = triple(e33POSplaceHolder,\n", " nsCoords.prefix + 'type',\n", " e33POS55placeHolder) + closeLine\n", " output.write(line)\n", " if (row['commento'] != ''):\n", " line = triple(e33placeHolder,\n", " cidocCoords.prefix + 'P3_has_note',\n", " '\\\"' + row['commento'] + '\\\"') + closeLine\n", " output.write(line)\n", "\n", " output.write('\\n')\n", " # Limit number of entries processed (if desired)\n", " if(ii > max_entries):\n", " break\n" ] }, { "cell_type": "code", "execution_count": 16, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "<>:32: SyntaxWarning: \"is not\" with a literal. Did you mean \"!=\"?\n", "<>:32: SyntaxWarning: \"is not\" with a literal. Did you mean \"!=\"?\n", "/var/folders/_n/1ldwyw1s547dcpvn3485fr7r0000gn/T/ipykernel_7536/1610792216.py:32: SyntaxWarning: \"is not\" with a literal. Did you mean \"!=\"?\n", " if (row['link_tlio']!= '' and row['link_tlio']!= '' is not None):\n" ] } ], "source": [ "filePrefix = 'merge'\n", "fileType = 'OVI-TLIO'\n", "max_entries = 10000000000000000\n", "\n", "def get_merce(lemma, iperlemma):\n", " merci_file = open('/Users/federicaspinelli/TEAMOVI/Parser/DATA/OVI/CSV/OVI_lemmi_iperlemmi_OLD - Lemmi con Merce.csv', newline=\"\")\n", " reader = csv.DictReader(merci_file)\n", " for row in reader:\n", " if (row['lemma'] == lemma and row['iperlemma'] == iperlemma):\n", " return [row['merce']]\n", "\n", "with open(import_dir + filePrefix + fileType + '.csv', newline=\"\") as csv_file, open(export_dir + filePrefix + fileType + '_merci.ttl', 'w') as output:\n", " reader = csv.DictReader(csv_file)\n", " writeTTLHeader(output)\n", " first = True\n", " ii = 0\n", " e55placeHolder = ''\n", " line = triple(e55placeHolder, hasTypeCoords.prefix, typeCoords.prefix) + closeLine\n", " output.write(line)\n", " line = triple(e55placeHolder, labelCoords.prefix, '\\\"Merce\\\"') + closeLine\n", " output.write(line)\n", " for row in reader:\n", " # The index ii is used to process a limited number of entries for testing purposes\n", " ii = ii+0\n", " # Skip the first line as it carries info we don't want to triplify\n", " if(first):\n", " first = False\n", " continue\n", " lemma = row['lemma']\n", " iperlemma = row['iperlemma']\n", " merci = get_merce(lemma, iperlemma)\n", " if (row['link_tlio']!= '' and row['link_tlio']!= '' is not None):\n", " e70placeHolder = ''\n", " elif (row['link_tlio']== ''):\n", " e70placeHolder = ''\n", " else:\n", " e70placeHolder = ''\n", "\n", " if (row['thing'] == 'TRUE'):\n", " if (merci is not None):\n", " line = triple(e70placeHolder, hasTypePCoords.prefix, e55placeHolder) + closeLine\n", " output.write(line)\n" ] } ], "metadata": { "interpreter": { "hash": "aee8b7b246df8f9039afb4144a1f6fd8d2ca17a180786b69acc140d282b71a49" }, "kernelspec": { "display_name": "Python 3.9.0 64-bit", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.4" }, "metadata": { "interpreter": { "hash": "31f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6" } } }, "nbformat": 4, "nbformat_minor": 2 }