{
"cells": [
{
"cell_type": "code",
"execution_count": 74,
"metadata": {},
"outputs": [],
"source": [
"# Utilities to read/write csv files\n",
"import csv\n",
"import unicodedata\n",
"# Ordered Dicts\n",
"from collections import OrderedDict\n",
"import json\n",
"\n",
"\n",
"# OPZIONAL IMPORTS\n",
"\n",
"# For timestamping/simple speed tests\n",
"from datetime import datetime\n",
"# Random number generator\n",
"from random import *\n",
"# System & command line utilities\n",
"import sys\n",
"# Json for the dictionary\n",
"import json"
]
},
{
"cell_type": "code",
"execution_count": 75,
"metadata": {},
"outputs": [],
"source": [
"import_dir = '/Users/federicaspinelli/TEAMOVI/Parser/DATA/OVI/CSV/'\n",
"export_dir = '/Users/federicaspinelli/TEAMOVI/Parser/DATA/OVI/RDF/'"
]
},
{
"cell_type": "code",
"execution_count": 76,
"metadata": {},
"outputs": [],
"source": [
"# Custom class to store URIs + related infos for the ontologies/repositories\n",
"\n",
"class RDFcoords:\n",
" def __init__(self, uri, prefix, code = None):\n",
" self.uri = uri\n",
" self.prefix = prefix\n",
" self.code = code\n",
"\n",
"# Repositories\n",
"museoCoords = RDFcoords('', 'mpp:')\n",
"autCoords = RDFcoords('', 'aut:')\n",
"cidocCoords = RDFcoords('', 'crm:')\n",
"aatCoords = RDFcoords('', 'aat:')\n",
"nsCoords = RDFcoords('', 'rdf:')\n",
"schemaCoords = RDFcoords('', 'rdfs:')\n",
"subClassOfCoords = RDFcoords('', 'so:')\n",
"datiniCoords = RDFcoords('', 'dt:')\n",
"personAuthCoords = RDFcoords('', 'pa:')\n",
"\n",
"# W3/CIDOC Predicates\n",
"hasTypeCoords = RDFcoords('', 'tp:')\n",
"hasTypePCoords = RDFcoords('', 'te:')\n",
"carriesCoords = RDFcoords('', 'ca:')\n",
"identifiedByCoords = RDFcoords('', 'ib:')\n",
"labelCoords = RDFcoords('', 'lb:')\n",
"wasBroughtCoords = RDFcoords('', 'wb:')\n",
"carriedByCoords = RDFcoords('', 'cb:')\n",
"hasAlternativeFormCoords = RDFcoords('', 'af:')\n",
"hasNoteCoords = RDFcoords('', 'no:')\n",
"hasTypeNCoords = RDFcoords('', 'tn:')\n",
"hasLanguageCoords = RDFcoords('', 'hl:')\n",
"documentsCoords = RDFcoords('', 'ds:')\n",
"hasComponentCoords = RDFcoords('', 'hc:')\n",
"\n",
"# CIDOC Objects\n",
"manMadeObjectCoords = RDFcoords('', 'mo:', 'E22')\n",
"informationObjectCoords = RDFcoords('', 'io:', 'E73')\n",
"titleCoords = RDFcoords('', 'ti:' ,'E35')\n",
"placeAppellationCoords = RDFcoords('', 'pa:', 'E44')\n",
"identifierCoords = RDFcoords('', 'id:', 'E42')\n",
"typeCoords = RDFcoords('', 'ty:', 'E55')\n",
"creationCoords = RDFcoords('', 'cr:', 'E65')\n",
"personCoords = RDFcoords('', 'ps:', 'E21')\n",
"stringCoords = RDFcoords('', 'sr:', 'E62')\n",
"linguisticObjCoords = RDFcoords('', 'lj:', 'E33')\n",
"languageCoords = RDFcoords('', 'ln:', 'E56')\n",
"appellationCoords = RDFcoords('', 'ap:', 'E41')\n",
"propositionalObjCoords = RDFcoords('', 'pj:', 'E89')\n"
]
},
{
"cell_type": "code",
"execution_count": 77,
"metadata": {},
"outputs": [],
"source": [
"# Basic functions for triples / shortened triples in TTL format\n",
"\n",
"def triple(subject, predicate, object1):\n",
" line = subject + ' ' + predicate + ' ' + object1\n",
" return line\n",
"\n",
"def doublet(predicate, object1):\n",
" line = ' ' + predicate + ' ' + object1\n",
" return line\n",
"\n",
"def singlet(object1):\n",
" line = ' ' + object1\n",
" return line\n",
"\n",
"# Line endings in TTL format\n",
"continueLine1 = ' ;\\n'\n",
"continueLine2 = ' ,\\n'\n",
"closeLine = ' .\\n'"
]
},
{
"cell_type": "code",
"execution_count": 78,
"metadata": {},
"outputs": [],
"source": [
"def writeTTLHeader(output):\n",
" output.write('@prefix ' + datiniCoords.prefix + ' ' + datiniCoords.uri + closeLine)\n",
" output.write('@prefix ' + personAuthCoords.prefix + ' ' + personAuthCoords.uri + closeLine)\n",
" output.write('@prefix ' + hasTypeCoords.prefix + ' ' + hasTypeCoords.uri + closeLine)\n",
" output.write('@prefix ' + hasTypePCoords.prefix + ' ' + hasTypePCoords.uri + closeLine)\n",
" output.write('@prefix ' + manMadeObjectCoords.prefix + ' ' + manMadeObjectCoords.uri + closeLine)\n",
" output.write('@prefix ' + carriesCoords.prefix + ' ' + carriesCoords.uri + closeLine)\n",
" output.write('@prefix ' + informationObjectCoords.prefix + ' ' + informationObjectCoords.uri + closeLine)\n",
" output.write('@prefix ' + identifiedByCoords.prefix + ' ' + identifiedByCoords.uri + closeLine)\n",
" output.write('@prefix ' + titleCoords.prefix + ' ' + titleCoords.uri + closeLine)\n",
" output.write('@prefix ' + labelCoords.prefix + ' ' + labelCoords.uri + closeLine)\n",
" output.write('@prefix ' + identifierCoords.prefix + ' ' + identifierCoords.uri + closeLine)\n",
" output.write('@prefix ' + wasBroughtCoords.prefix + ' ' + wasBroughtCoords.uri + closeLine)\n",
" output.write('@prefix ' + typeCoords.prefix + ' ' + typeCoords.uri + closeLine)\n",
" output.write('@prefix ' + carriedByCoords.prefix + ' ' + carriedByCoords.uri + closeLine)\n",
" output.write('@prefix ' + personCoords.prefix + ' ' + personCoords.uri + closeLine)\n",
" output.write('@prefix ' + hasAlternativeFormCoords.prefix + ' ' + hasAlternativeFormCoords.uri + closeLine)\n",
" output.write('@prefix ' + hasNoteCoords.prefix + ' ' + hasNoteCoords.uri + closeLine)\n",
" output.write('@prefix ' + hasTypeNCoords.prefix + ' ' + hasTypeNCoords.uri + closeLine)\n",
" output.write('@prefix ' + stringCoords.prefix + ' ' + stringCoords.uri + closeLine)\n",
" output.write('@prefix ' + linguisticObjCoords.prefix + ' ' + linguisticObjCoords.uri + closeLine)\n",
" output.write('@prefix ' + languageCoords.prefix + ' ' + languageCoords.uri + closeLine)\n",
" output.write('@prefix ' + hasLanguageCoords.prefix + ' ' + hasLanguageCoords.uri + closeLine)\n",
" output.write('@prefix ' + documentsCoords.prefix + ' ' + documentsCoords.uri + closeLine)\n",
" output.write('@prefix ' + appellationCoords.prefix + ' ' + appellationCoords.uri + closeLine)\n",
" output.write('@prefix ' + propositionalObjCoords.prefix + ' ' + propositionalObjCoords.uri + closeLine)\n",
" output.write('@prefix ' + hasComponentCoords.prefix + ' ' + hasComponentCoords.uri + closeLine)\n",
" output.write('@prefix ' + museoCoords.prefix + ' ' + museoCoords.uri + closeLine)\n",
" output.write('@prefix ' + cidocCoords.prefix + ' ' + cidocCoords.uri + closeLine)\n",
" output.write('@prefix ' + aatCoords.prefix + ' ' + aatCoords.uri + closeLine)\n",
" output.write('@prefix ' + schemaCoords.prefix + ' ' + schemaCoords.uri + closeLine)\n",
" output.write('@prefix ' + nsCoords.prefix + ' ' + nsCoords.uri + closeLine)\n",
" output.write('@prefix ' + autCoords.prefix + ' ' + autCoords.uri + closeLine)\n",
" output.write('@prefix ' + subClassOfCoords.prefix + ' ' + subClassOfCoords.uri + closeLine)\n",
" output.write('\\n')\n"
]
},
{
"cell_type": "code",
"execution_count": 89,
"metadata": {
"tags": []
},
"outputs": [
{
"ename": "KeyboardInterrupt",
"evalue": "",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m/var/folders/_n/1ldwyw1s547dcpvn3485fr7r0000gn/T/ipykernel_1726/101792726.py\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 40\u001b[0m \u001b[0mlemma\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mrow\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'lemma'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 41\u001b[0m \u001b[0miperlemma\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mrow\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'iperlemma'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 42\u001b[0;31m \u001b[0mmerci\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mget_merce\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlemma\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0miperlemma\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 43\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 44\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mrow\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'thing'\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;34m'TRUE'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/var/folders/_n/1ldwyw1s547dcpvn3485fr7r0000gn/T/ipykernel_1726/101792726.py\u001b[0m in \u001b[0;36mget_merce\u001b[0;34m(lemma, iperlemma)\u001b[0m\n\u001b[1;32m 9\u001b[0m \u001b[0mreader\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcsv\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mDictReader\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmerci_file\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 10\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mrow\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mreader\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 11\u001b[0;31m \u001b[0;32mif\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mrow\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'lemma'\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0mlemma\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0mrow\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'iperlemma'\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0miperlemma\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 12\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0mrow\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'merce'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 13\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mKeyboardInterrupt\u001b[0m: "
]
}
],
"source": [
"filePrefix = 'data_'\n",
"fileType = 'lemmi_iperlemmi_thing_id'\n",
"max_entries = 10000000000000000\n",
"\n",
"\n",
"def get_merce(lemma, iperlemma):\n",
" merci_file = open(\n",
" '/Users/federicaspinelli/TEAMOVI/Parser/DATA/OVI/CSV/OVI_lemmi_iperlemmi_OLD - Lemmi con Merce.csv', newline=\"\")\n",
" reader = csv.DictReader(merci_file)\n",
" for row in reader:\n",
" if (row['lemma'] == lemma and row['iperlemma'] == iperlemma):\n",
" return [row['merce']]\n",
"\n",
"\n",
"with open(import_dir + filePrefix + fileType + '.csv', newline=\"\") as csv_file, open(export_dir + filePrefix + fileType + '.ttl', 'w') as output:\n",
" reader = csv.DictReader(csv_file)\n",
" writeTTLHeader(output)\n",
" first = True\n",
" ii = 0\n",
" e55placeHolder = ''\n",
" line = triple(e55placeHolder, schemaCoords.prefix +\n",
" 'label', '\\\"Lemma\\\"') + closeLine\n",
" output.write(line)\n",
" line = triple(e55placeHolder, nsCoords.prefix +\n",
" 'type', '\\\"Lemma\\\"') + closeLine\n",
" output.write(line)\n",
"\n",
" for row in reader:\n",
" # The index ii is used to process a limited number of entries for testing purposes\n",
" ii = ii+0\n",
" e33POSplaceHolder = ''\n",
" e28placeHolder = ''\n",
" e33placeHolder = ''\n",
" e28oplaceHolder = ''\n",
" lemma = row['lemma']\n",
" iperlemma = row['iperlemma']\n",
" merci = get_merce(lemma, iperlemma)\n",
"\n",
" if (row['thing'] == 'TRUE'):\n",
" if (merci is not None):\n",
" if (row['id'] is not None and row['id'] != '' and row['id'] != ' '):\n",
" e70placeHolder = ''\n",
" e73ASPOplaceHolder = \"\"\n",
" line = triple(e73ASPOplaceHolder, cidocCoords.prefix + 'P67_refers_to', e70placeHolder) + closeLine\n",
" output.write(line)\n",
" else:\n",
" e70placeHolder = ''\n",
" elif (merci is None):\n",
" e70placeHolder = ''\n",
" \n",
" if (row['pos'] != 'antr.' and row['pos'] != 'n.g.'):\n",
" if (row['id'] is not None and row['id'] != '' and row['id'] != ' '):\n",
" e73OVIplaceHolder = \"\"\n",
" line = triple(e73OVIplaceHolder, cidocCoords.prefix +\n",
" 'P67_refers_to', e70placeHolder) + closeLine\n",
" output.write(line)\n",
" line = triple(e70placeHolder,\n",
" nsCoords.prefix + 'type',\n",
" cidocCoords.prefix + 'E70_Thing') + closeLine\n",
" output.write(line)\n",
" line = triple(e70placeHolder,\n",
" schemaCoords.prefix + 'label',\n",
" '\\\"' + row['lemma'] + '\\\"') + closeLine\n",
" output.write(line)\n",
" line = triple(e70placeHolder, cidocCoords.prefix +\n",
" 'P128_carries', e33placeHolder) + closeLine\n",
" output.write(line)\n",
" # E33\n",
" line = triple(e33placeHolder,\n",
" nsCoords.prefix + 'type',\n",
" cidocCoords.prefix + 'E33_Linguistic_Object') + closeLine\n",
" output.write(line)\n",
" line = triple(e33placeHolder,\n",
" schemaCoords.prefix + 'label',\n",
" '\\\"' + row['lemma'] + '\\\"') + closeLine\n",
" output.write(line)\n",
" line = triple(e33placeHolder,\n",
" nsCoords.prefix + 'type',\n",
" e55placeHolder) + closeLine\n",
" output.write(line)\n",
" if (row['iperlemma'] != ''):\n",
" line = triple(e33placeHolder,\n",
" subClassOfCoords.prefix,\n",
" e28placeHolder) + closeLine\n",
" output.write(line)\n",
" if (row['pos'] != ''):\n",
" e33POS55placeHolder = ''\n",
" line = triple(e33placeHolder,\n",
" cidocCoords.prefix + 'P2_has_type',\n",
" e33POSplaceHolder) + closeLine\n",
" output.write(line)\n",
" line = triple(e33POSplaceHolder,\n",
" nsCoords.prefix + 'type',\n",
" e33POS55placeHolder) + closeLine\n",
" output.write(line)\n",
" line = triple(e33POS55placeHolder,\n",
" schemaCoords.prefix + 'label',\n",
" '\\\"' + row['pos'] + '\\\"') + closeLine\n",
" output.write(line)\n",
" if (row['commento'] != ''):\n",
" line = triple(e33placeHolder,\n",
" cidocCoords.prefix + 'P3_has_note',\n",
" '\\\"' + row['commento'] + '\\\"') + closeLine\n",
" output.write(line)\n",
" elif (row['id'] is None or row['id'] == ''):\n",
" line = triple(e70placeHolder,\n",
" nsCoords.prefix + 'type',\n",
" cidocCoords.prefix + 'E70_Thing') + closeLine\n",
" output.write(line)\n",
" line = triple(e70placeHolder,\n",
" schemaCoords.prefix + 'label',\n",
" '\\\"' + row['lemma'] + '\\\"') + closeLine\n",
" output.write(line)\n",
" line = triple(e70placeHolder, cidocCoords.prefix +\n",
" 'P128_carries', e33placeHolder) + closeLine\n",
" output.write(line)\n",
" # E33\n",
" line = triple(e33placeHolder,\n",
" nsCoords.prefix + 'type',\n",
" cidocCoords.prefix + 'E33_Linguistic_Object') + closeLine\n",
" output.write(line)\n",
" line = triple(e33placeHolder,\n",
" schemaCoords.prefix + 'label',\n",
" '\\\"' + row['lemma'] + '\\\"') + closeLine\n",
" output.write(line)\n",
" line = triple(e33placeHolder,\n",
" nsCoords.prefix + 'type',\n",
" e55placeHolder) + closeLine\n",
" output.write(line)\n",
" if (row['iperlemma'] != ''):\n",
" line = triple(e33placeHolder,\n",
" subClassOfCoords.prefix,\n",
" e28placeHolder) + closeLine\n",
" output.write(line)\n",
" if (row['pos'] != ''):\n",
" line = triple(e33placeHolder,\n",
" cidocCoords.prefix + 'P2_has_type',\n",
" e33POSplaceHolder) + closeLine\n",
" output.write(line)\n",
" line = triple(e33POSplaceHolder,\n",
" schemaCoords.prefix + 'label',\n",
" '\\\"' + row['pos'] + '\\\"') + closeLine\n",
" output.write(line)\n",
" line = triple(e33POSplaceHolder,\n",
" cidocCoords.prefix + 'P2_has_type',\n",
" '\\\"Part of Speech\\\"') + closeLine\n",
" output.write(line)\n",
" if (row['commento'] != ''):\n",
" line = triple(e33placeHolder,\n",
" cidocCoords.prefix + 'P3_has_note',\n",
" '\\\"' + row['commento'] + '\\\"') + closeLine\n",
" output.write(line)\n",
" if (row['thing'] == 'FALSE'):\n",
" if (row['pos'] != 'antr.' and row['pos'] != 'n.g.'):\n",
" if (row['id'] is not None and row['id'] != ''):\n",
" e73OVIplaceHolder = \"\"\n",
" line = triple(e73OVIplaceHolder, cidocCoords.prefix +\n",
" 'P67_refers_to', e28oplaceHolder) + closeLine\n",
" output.write(line)\n",
" line = triple(e28oplaceHolder,\n",
" nsCoords.prefix + 'type',\n",
" cidocCoords.prefix + 'E28_Conceptual_Object') + closeLine\n",
" output.write(line)\n",
" line = triple(e28oplaceHolder,\n",
" schemaCoords.prefix + 'label',\n",
" '\\\"' + row['lemma'] + '\\\"') + closeLine\n",
" output.write(line)\n",
" line = triple(e28oplaceHolder, cidocCoords.prefix +\n",
" 'P128_carries', e33placeHolder) + closeLine\n",
" output.write(line)\n",
" # E33\n",
" line = triple(e33placeHolder,\n",
" nsCoords.prefix + 'type',\n",
" cidocCoords.prefix + 'E33_Linguistic_Object') + closeLine\n",
" output.write(line)\n",
" line = triple(e33placeHolder,\n",
" schemaCoords.prefix + 'label',\n",
" '\\\"' + row['lemma'] + '\\\"') + closeLine\n",
" output.write(line)\n",
" line = triple(e33placeHolder,\n",
" nsCoords.prefix + 'type',\n",
" e55placeHolder) + closeLine\n",
" output.write(line)\n",
" if (row['iperlemma'] != ''):\n",
" line = triple(e33placeHolder,\n",
" subClassOfCoords.prefix,\n",
" e28placeHolder) + closeLine\n",
" output.write(line)\n",
" if (row['pos'] != ''):\n",
" line = triple(e33placeHolder,\n",
" cidocCoords.prefix + 'P2_has_type',\n",
" e33POSplaceHolder) + closeLine\n",
" output.write(line)\n",
" line = triple(e33POSplaceHolder,\n",
" schemaCoords.prefix + 'label',\n",
" '\\\"' + row['pos'] + '\\\"') + closeLine\n",
" output.write(line)\n",
" line = triple(e33POSplaceHolder,\n",
" cidocCoords.prefix + 'P2_has_type',\n",
" '\\\"Part of Speech\\\"') + closeLine\n",
" output.write(line)\n",
" if (row['commento'] != ''):\n",
" line = triple(e33placeHolder,\n",
" cidocCoords.prefix + 'P3_has_note',\n",
" '\\\"' + row['commento'] + '\\\"') + closeLine\n",
" output.write(line)\n",
" elif (row['id'] is None or row['id'] == ''):\n",
" line = triple(e28oplaceHolder,\n",
" nsCoords.prefix + 'type',\n",
" cidocCoords.prefix + 'E28_Conceptual_Object') + closeLine\n",
" output.write(line)\n",
" line = triple(e28oplaceHolder,\n",
" schemaCoords.prefix + 'label',\n",
" '\\\"' + row['lemma'] + '\\\"') + closeLine\n",
" output.write(line)\n",
" line = triple(e28oplaceHolder, cidocCoords.prefix +\n",
" 'P128_carries', e33placeHolder) + closeLine\n",
" output.write(line)\n",
" # E33\n",
" line = triple(e33placeHolder,\n",
" nsCoords.prefix + 'type',\n",
" cidocCoords.prefix + 'E33_Linguistic_Object') + closeLine\n",
" output.write(line)\n",
" line = triple(e33placeHolder,\n",
" schemaCoords.prefix + 'label',\n",
" '\\\"' + row['lemma'] + '\\\"') + closeLine\n",
" output.write(line)\n",
" line = triple(e33placeHolder,\n",
" nsCoords.prefix + 'type',\n",
" e55placeHolder) + closeLine\n",
" output.write(line)\n",
" if (row['iperlemma'] != ''):\n",
" line = triple(e33placeHolder,\n",
" subClassOfCoords.prefix,\n",
" e28placeHolder) + closeLine\n",
" output.write(line)\n",
" if (row['pos'] != ''):\n",
" e33POS55placeHolder = ''\n",
" line = triple(e33placeHolder,\n",
" cidocCoords.prefix + 'P2_has_type',\n",
" e33POSplaceHolder) + closeLine\n",
" output.write(line)\n",
" line = triple(e33POSplaceHolder, schemaCoords.prefix + 'label', '\\\"' + row['pos'] + '\\\"') + closeLine\n",
" output.write(line)\n",
" line = triple(e33POSplaceHolder,\n",
" nsCoords.prefix + 'type',\n",
" e33POS55placeHolder) + closeLine\n",
" output.write(line)\n",
" line = triple(e33POS55placeHolder,\n",
" schemaCoords.prefix + 'label',\n",
" '\\\"Part of Speech\\\"') + closeLine\n",
" output.write(line)\n",
" if (row['commento'] != ''):\n",
" line = triple(e33placeHolder,\n",
" cidocCoords.prefix + 'P3_has_note',\n",
" '\\\"' + row['commento'] + '\\\"') + closeLine\n",
" output.write(line)\n",
"\n",
" output.write('\\n')\n",
" # Limit number of entries processed (if desired)\n",
" if(ii > max_entries):\n",
" break\n"
]
},
{
"cell_type": "code",
"execution_count": 88,
"metadata": {},
"outputs": [],
"source": [
"filePrefix = 'data_'\n",
"fileType = 'lemmi_iperlemmi_thing_id'\n",
"max_entries = 10000000000000000\n",
"\n",
"def get_merce(lemma, iperlemma):\n",
" merci_file = open('/Users/federicaspinelli/TEAMOVI/Parser/DATA/OVI/CSV/OVI_lemmi_iperlemmi_OLD - Lemmi con Merce.csv', newline=\"\")\n",
" reader = csv.DictReader(merci_file)\n",
" for row in reader:\n",
" if (row['lemma'] == lemma and row['iperlemma'] == iperlemma):\n",
" return [row['merce']]\n",
"\n",
"with open(import_dir + filePrefix + fileType + '.csv', newline=\"\") as csv_file, open(export_dir + filePrefix + fileType + '_merci.ttl', 'w') as output:\n",
" reader = csv.DictReader(csv_file)\n",
" writeTTLHeader(output)\n",
" first = True\n",
" ii = 0\n",
" e55placeHolder = ''\n",
" line = triple(e55placeHolder, hasTypeCoords.prefix, typeCoords.prefix) + closeLine\n",
" output.write(line)\n",
" line = triple(e55placeHolder, labelCoords.prefix, '\\\"Merce\\\"') + closeLine\n",
" output.write(line)\n",
" for row in reader:\n",
" # The index ii is used to process a limited number of entries for testing purposes\n",
" ii = ii+0\n",
" # Skip the first line as it carries info we don't want to triplify\n",
" if(first):\n",
" first = False\n",
" continue\n",
" lemma = row['lemma']\n",
" iperlemma = row['iperlemma']\n",
" merci = get_merce(lemma, iperlemma)\n",
" if (row['thing'] == 'TRUE'):\n",
" if (merci is not None):\n",
" e70placeHolder = ''\n",
" line = triple(e70placeHolder, hasTypePCoords.prefix, e55placeHolder) + closeLine\n",
" output.write(line)\n"
]
}
],
"metadata": {
"interpreter": {
"hash": "aee8b7b246df8f9039afb4144a1f6fd8d2ca17a180786b69acc140d282b71a49"
},
"kernelspec": {
"display_name": "Python 3.9.0 64-bit",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.0"
},
"metadata": {
"interpreter": {
"hash": "31f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6"
}
}
},
"nbformat": 4,
"nbformat_minor": 2
}