# Utilities to read/write csv files
import csv
# Utilities to handle character encodings
import unicodedata
# Ordered Dicts
from collections import OrderedDict
import json
# OPZIONAL IMPORTS
# For timestamping/simple speed tests
from datetime import datetime
# Random number generator
from random import *
# System & command line utilities
import sys
# Json for the dictionary
import json
import_dir = '/Users/leonardocanova/Library/CloudStorage/OneDrive-UniversityofPisa(1)/Documenti/Progetti università/OVI/Programmazione/ASPO/Luoghi/'
export_dir = '/Users/leonardocanova/Library/CloudStorage/OneDrive-UniversityofPisa(1)/Documenti/Progetti università/OVI/Programmazione/ASPO/Luoghi/'
# Custom class to store URIs + related infos for the ontologies/repositories
class RDFcoords:
def __init__(self, uri, prefix, code = None):
self.uri = uri
self.prefix = prefix
self.code = code
# Repositories
cidocCoords = RDFcoords('', 'crm:')
tgnCoords = RDFcoords('', 'tgn:')
nsCoords = RDFcoords('', 'rdf:')
schemaCoords = RDFcoords('', 'rdfs:')
owlCoords = RDFcoords('', 'owl:')
devCoords = RDFcoords('', 'dev:')
# Basic functions for triples / shortened triples in TTL format
def triple(subject, predicate, object1):
line = subject + ' ' + predicate + ' ' + object1
return line
def doublet(predicate, object1):
line = ' ' + predicate + ' ' + object1
return line
def singlet(object1):
line = ' ' + object1
return line
# Line endings in TTL format
continueLine1 = ' ;\n'
continueLine2 = ' ,\n'
closeLine = ' .\n'
def writeTTLHeader(output):
output.write('@prefix ' + cidocCoords.prefix + ' ' + cidocCoords.uri + closeLine)
output.write('@prefix ' + tgnCoords.prefix + ' ' + tgnCoords.uri + closeLine)
output.write('@prefix ' + schemaCoords.prefix + ' ' + schemaCoords.uri + closeLine)
output.write('@prefix ' + nsCoords.prefix + ' ' + nsCoords.uri + closeLine)
output.write('@prefix ' + owlCoords.prefix + ' ' + owlCoords.uri + closeLine)
output.write('@prefix ' + devCoords.prefix + ' ' + devCoords.uri + closeLine)
output.write('\n')
file = "luoghi_ASPO_tutti_ID"
max_entries = 1000000000
with open(import_dir + file + '.csv', newline="") as csv_file, open(
export_dir + file + '.ttl', 'w') as output:
reader = csv.DictReader(csv_file)
writeTTLHeader(output)
first = True
ii = 0
for row in reader:
# The index ii is used to process a limited number of entries for testing purposes
ii = ii + 1
#placeHolders
devPlaceHolder_provincia = devCoords.prefix + row['ID_PROVINCIA']
devPlaceHolder_comune = devCoords.prefix + row['ID_RESTORE_comune']
devPlaceHolder_microtoponimo = devCoords.prefix + row['ID_RESTORE_microtoponimo']
devPlaceHolder_edificio = devCoords.prefix + row['ID_edificio']
devPlaceHolder_parrocchia = devCoords.prefix + row['ID_parrocchia']
devPlaceHolder_micromicro = devCoords.prefix + row['ID_micromicrotoponimo']
if row['ID_edificio'] != "" and " ":
line = triple(devPlaceHolder_edificio,
nsCoords.prefix + 'type',
cidocCoords.prefix + 'E53_Place') + closeLine
output.write(line)
line = triple(devPlaceHolder_edificio,
schemaCoords.prefix + 'label',
'\"' + row['EVENTO edificio upper'] + '\"') + closeLine
output.write(line)
line = triple(devPlaceHolder_edificio,
cidocCoords.prefix + 'P1_is_identified_by',
'\"' + row['ID_edificio'] + '\"') + closeLine
output.write(line)
if row['ID_RESTORE_microtoponimo'] != "" and " ":
line = triple(devPlaceHolder_edificio,
cidocCoords.prefix + 'P89_falls_within',
devPlaceHolder_microtoponimo) + closeLine
output.write(line)
elif row ['ID_RESTORE_comune'] != "" and " ":
line = triple(devPlaceHolder_edificio,
cidocCoords.prefix + 'P89_falls_within',
devPlaceHolder_comune) + closeLine
output.write(line)
elif row ['ID_PROVINCIA'] != "" and " ":
line = triple(devPlaceHolder_edificio,
cidocCoords.prefix + 'P89_falls_within',
devPlaceHolder_provincia) + closeLine
output.write(line)
if row['ID_parrocchia'] != "" and " ":
line = triple(devPlaceHolder_parrocchia,
nsCoords.prefix + 'type',
cidocCoords.prefix + 'E53_Place') + closeLine
output.write(line)
line = triple(devPlaceHolder_parrocchia,
schemaCoords.prefix + 'label',
'\"' + row['EVENTO PARROCCHIA upper'] + '\"') + closeLine
output.write(line)
line = triple(devPlaceHolder_parrocchia,
cidocCoords.prefix + 'P1_is_identified_by',
'\"' + row['ID_parrocchia'] + '\"') + closeLine
output.write(line)
if row ['ID_edificio'] != "" and " ":
line = triple(devPlaceHolder_parrocchia,
cidocCoords.prefix + 'P89_falls_within',
devPlaceHolder_edificio) + closeLine
output.write(line)
elif row['ID_RESTORE_microtoponimo'] != "" and " ":
line = triple(devPlaceHolder_parrocchia,
cidocCoords.prefix + 'P89_falls_within',
devPlaceHolder_microtoponimo) + closeLine
output.write(line)
elif row ['ID_RESTORE_comune'] != "" and " ":
line = triple(devPlaceHolder_parrocchia,
cidocCoords.prefix + 'P89_falls_within',
devPlaceHolder_comune) + closeLine
output.write(line)
elif row ['ID_PROVINCIA'] != "" and " ":
line = triple(devPlaceHolder_parrocchia,
cidocCoords.prefix + 'P89_falls_within',
devPlaceHolder_provincia) + closeLine
output.write(line)
if row['ID_micromicrotoponimo'] != "" and " ":
line = triple(devPlaceHolder_micromicro,
nsCoords.prefix + 'type',
cidocCoords.prefix + 'E53_Place') + closeLine
output.write(line)
line = triple(devPlaceHolder_micromicro,
schemaCoords.prefix + 'label',
'\"' + row['EVENTO micro microtoponimo'] + '\"') + closeLine
output.write(line)
line = triple(devPlaceHolder_micromicro,
cidocCoords.prefix + 'P1_is_identified_by',
'\"' + row['ID_micromicrotoponimo'] + '\"') + closeLine
output.write(line)
if row ['ID_parrocchia'] != "" and " ":
line = triple(devPlaceHolder_micromicro,
cidocCoords.prefix + 'P89_falls_within',
devPlaceHolder_parrocchia) + closeLine
output.write(line)
if row ['ID_edificio'] != "" and " ":
line = triple(devPlaceHolder_micromicro,
cidocCoords.prefix + 'P89_falls_within',
devPlaceHolder_edificio) + closeLine
output.write(line)
elif row['ID_RESTORE_microtoponimo'] != "" and " ":
line = triple(devPlaceHolder_micromicro,
cidocCoords.prefix + 'P89_falls_within',
devPlaceHolder_microtoponimo) + closeLine
output.write(line)
elif row ['ID_RESTORE_comune'] != "" and " ":
line = triple(devPlaceHolder_micromicro,
cidocCoords.prefix + 'P89_falls_within',
devPlaceHolder_comune) + closeLine
output.write(line)
elif row ['ID_PROVINCIA'] != "" and " ":
line = triple(devPlaceHolder_micromicro,
cidocCoords.prefix + 'P89_falls_within',
devPlaceHolder_provincia) + closeLine
output.write(line)
output.write('\n')
#
#
# Limit number of entries processed (if desired)
if (ii > max_entries):
break