# Utilities to read/write csv files import csv # Utilities to handle character encodings import unicodedata # Ordered Dicts from collections import OrderedDict from urllib.request import urlopen from bs4 import BeautifulSoup import json # OPZIONAL IMPORTS # For timestamping/simple speed tests from datetime import datetime # Random number generator from random import * # System & command line utilities import sys # Json for the dictionary import json import_dir = '/Users/federicaspinelli/TEAMOVI/Parser/DATA/MPP/CSV/' export_dir = '/Users/federicaspinelli/TEAMOVI/Parser/DATA/MPP/RDF/' # Custom class to store URIs + related infos for the ontologies/repositories class RDFcoords: def __init__(self, uri, prefix, code=None): self.uri = uri self.prefix = prefix self.code = code # Repositories museoCoords = RDFcoords('', 'mpp:') autCoords = RDFcoords('', 'aut:') cidocCoords = RDFcoords('', 'crm:') aatCoords = RDFcoords('', 'aat:') nsCoords = RDFcoords('', 'rdf:') schemaCoords = RDFcoords('', 'rdfs:') xsdCoords = RDFcoords('', 'xsd:') iconCoords = RDFcoords('', 'ico:') foafCoords = RDFcoords('', 'foaf:') owlCoords = RDFcoords('', 'owl:') # Basic functions for triples / shortened triples in TTL format def triple(subject, predicate, object1): line = subject + ' ' + predicate + ' ' + object1 return line def doublet(predicate, object1): line = ' ' + predicate + ' ' + object1 return line def singlet(object1): line = ' ' + object1 return line # Line endings in TTL format continueLine1 = ' ;\n' continueLine2 = ' ,\n' closeLine = ' .\n' def writeTTLHeader(output): output.write('@prefix ' + museoCoords.prefix + ' ' + museoCoords.uri + closeLine) output.write('@prefix ' + cidocCoords.prefix + ' ' + cidocCoords.uri + closeLine) output.write('@prefix ' + aatCoords.prefix + ' ' + aatCoords.uri + closeLine) output.write('@prefix ' + schemaCoords.prefix + ' ' + schemaCoords.uri + closeLine) output.write('@prefix ' + nsCoords.prefix + ' ' + nsCoords.uri + closeLine) output.write('@prefix ' + autCoords.prefix + ' ' + autCoords.uri + closeLine) output.write('@prefix ' + xsdCoords.prefix + ' ' + xsdCoords.uri + closeLine) output.write('@prefix ' + iconCoords.prefix + ' ' + iconCoords.uri + closeLine) output.write('@prefix ' + owlCoords.prefix + ' ' + owlCoords.uri + closeLine) output.write('@prefix ' + foafCoords.prefix + ' ' + foafCoords.uri + closeLine) output.write('\n') filePrefix = 'OA' fileType = '_catalogo' max_entries = 1000000000 def get_aut_url(code): aut_file = open(import_dir + 'AR20AUT_' + fileType + '.csv', newline="") reader = csv.DictReader(aut_file) for row in reader: auth = int(row['AUTH']) cod = int(code) role = '' if row['AUTQ'] != '': role = row['AUTQ'] else: role = '' if auth == cod: return [row['URL'], role] def get_role(role): role_file = open('/Users/federicaspinelli/Google Drive/OVI-CNR/CSV/MPP/AAT_RUOLI.csv', newline="") reader = csv.DictReader(role_file) for row in reader: if row['Label'] == role: return row['AAT'] def get_elem(mtc): mtc_file = open('/Users/federicaspinelli/Google Drive/OVI-CNR/CSV/MPP/AAT_MTC.csv', newline="") reader = csv.DictReader(mtc_file) for row in reader: if row['MTC'] == mtc: return [row['AAT'], row['Type']] with open(import_dir + filePrefix + fileType + '.csv', newline="") as csv_file, open( export_dir + filePrefix + fileType + '.ttl', 'w') as output: reader = csv.DictReader(csv_file) writeTTLHeader(output) first = True ii = 0 for row in reader: # The index ii is used to process a limited number of entries for testing purposes ii = ii + 1 # placeHolders urlplaceHolder = '<' + row['Url OPERA'] + '>' e36placeHolder = '<' + row['Url OPERA'] + '_E36>' e36e42placeHolder = '<' + row['Url OPERA'] + '_E36_E42>' e36placeHolder2 = '<' + row['Url OPERA'] + '_E36_2>' e36e42placeHolder2 = '<' + row['Url OPERA'] + '_E36_2_E42>' e36placeHolder3 = '<' + row['Url OPERA'] + '_E36_3>' e36e42placeHolder3 = '<' + row['Url OPERA'] + '_E36_3_E42>' e31placeHolder = '<' + row['Url OPERA'] + '_E31>' e3142placeHolder = '<' + row['Url OPERA'] + '_E31_E42>' e31e36rplaceHolder = '<' + row['Url OPERA'] + '_E31_E36_r>' e31e36re42placeHolder = '<' + row['Url OPERA'] + '_E31_E36_r_E42>' e31e36vplaceHolder = '<' + row['Url OPERA'] + '_E31_E36_v>' e31e36ve42placeHolder = '<' + row['Url OPERA'] + '_E31_E36_v_E42>' e31AplaceHolder = '<' + row['LINK ARCHIVE.org'] + '>' e31Ae42placeHolder = '<' + row['LINK ARCHIVE.org'] + '_E42>' e13placeHolder = '<' + row['Url OPERA'] + '_E13_FA>' e31Ae55placeHolder = '<' + row['AAT (Attribution)'] + '_E31_E55>' e12placeHolder = '<' + row['Url OPERA'] + '_E12>' e12FplaceHolder = '<' + row['Url OPERA'] + '_E12F>' e21ULANplaceHolder = '<'+row['ULAN']+'>' if (row['LINK SCHEDA AUT']!= ''): e21AUTplaceHolder = '<'+row['LINK SCHEDA AUT']+'>' if (row['ULAN']!= ''): line = triple(e21AUTplaceHolder, owlCoords.prefix + 'sameAs', e21ULANplaceHolder) + closeLine output.write(line) if (row['VIAF']!= ''): e21VIAFplaceHolder = '<'+row['VIAF']+'>' line = triple(e21AUTplaceHolder, owlCoords.prefix + 'sameAs', e21VIAFplaceHolder) + closeLine output.write(line) output.write('\n') # # # Limit number of entries processed (if desired) if (ii > max_entries): break