# Utilities to read/write csv files import csv # Utilities to handle character encodings import unicodedata # Ordered Dicts from collections import OrderedDict import json # OPZIONAL IMPORTS # For timestamping/simple speed tests from datetime import datetime # Random number generator from random import * # System & command line utilities import sys # Json for the dictionary import json import_dir = '/Users/leonardocanova/Library/CloudStorage/OneDrive-UniversityofPisa(1)/Documenti/Progetti università/OVI/Programmazione/ASPO/Luoghi/' export_dir = '/Users/leonardocanova/Library/CloudStorage/OneDrive-UniversityofPisa(1)/Documenti/Progetti università/OVI/Programmazione/ASPO/Luoghi/' # Custom class to store URIs + related infos for the ontologies/repositories class RDFcoords: def __init__(self, uri, prefix, code = None): self.uri = uri self.prefix = prefix self.code = code # Repositories cidocCoords = RDFcoords('', 'crm:') tgnCoords = RDFcoords('', 'tgn:') nsCoords = RDFcoords('', 'rdf:') schemaCoords = RDFcoords('', 'rdfs:') owlCoords = RDFcoords('', 'owl:') devCoords = RDFcoords('', 'dev:') # Basic functions for triples / shortened triples in TTL format def triple(subject, predicate, object1): line = subject + ' ' + predicate + ' ' + object1 return line def doublet(predicate, object1): line = ' ' + predicate + ' ' + object1 return line def singlet(object1): line = ' ' + object1 return line # Line endings in TTL format continueLine1 = ' ;\n' continueLine2 = ' ,\n' closeLine = ' .\n' def writeTTLHeader(output): output.write('@prefix ' + cidocCoords.prefix + ' ' + cidocCoords.uri + closeLine) output.write('@prefix ' + tgnCoords.prefix + ' ' + tgnCoords.uri + closeLine) output.write('@prefix ' + schemaCoords.prefix + ' ' + schemaCoords.uri + closeLine) output.write('@prefix ' + nsCoords.prefix + ' ' + nsCoords.uri + closeLine) output.write('@prefix ' + owlCoords.prefix + ' ' + owlCoords.uri + closeLine) output.write('@prefix ' + devCoords.prefix + ' ' + devCoords.uri + closeLine) output.write('\n') file = "merge_luoghi_ASPO - comuni_microtoponimi_UNIQUE" max_entries = 1000000000 with open(import_dir + file + '.csv', newline="") as csv_file, open( export_dir + file + '.ttl', 'w') as output: reader = csv.DictReader(csv_file) writeTTLHeader(output) first = True ii = 0 for row in reader: # The index ii is used to process a limited number of entries for testing purposes ii = ii + 1 #placeHolders devPlaceHolder_comune = devCoords.prefix + row['ID_RESTORE_comune'] devPlaceHolder_provincia = devCoords.prefix + row['ID_PROVINCIA'] if row['ID_RESTORE_microtoponimo'] != "": if row['ID_RESTORE_comune'] != "" and " ": devPlaceHolder_microtoponimo = devCoords.prefix + row['ID_RESTORE_microtoponimo'] line = triple(devPlaceHolder_microtoponimo, cidocCoords.prefix + 'P89_falls_within', devPlaceHolder_comune) + closeLine output.write(line) else: if row ['ID_PROVINCIA'] != "" and " ": line = triple(devPlaceHolder_microtoponimo, cidocCoords.prefix + 'P89_falls_within', devPlaceHolder_provincia) + closeLine output.write(line) output.write('\n') # # # Limit number of entries processed (if desired) if (ii > max_entries): break