# Utilities to read/write csv files
import csv
from types import NoneType
# Utilities to handle character encodings
import unicodedata
# Ordered Dicts
from collections import OrderedDict
import json
# OPZIONAL IMPORTS
# For timestamping/simple speed tests
from datetime import datetime
# Random number generator
from random import *
# System & command line utilities
import sys
# Json for the dictionary
import json
import csv
# Utilities to handle character encodings
import unicodedata
# Ordered Dicts
from collections import OrderedDict
import json
import string
import_dir = '/Users/federicaspinelli/TEAMOVI/Parser/DATA/TOPONIMI/CSV/'
export_dir = '/Users/federicaspinelli/TEAMOVI/Parser/DATA/TOPONIMI/RDF/'
# Custom class to store URIs + related infos for the ontologies/repositories
class RDFcoords:
def __init__(self, uri, prefix, code = None):
self.uri = uri
self.prefix = prefix
self.code = code
# Repositories
aspoCoords = RDFcoords('', 'dat:')
placeCoords = RDFcoords('', 'pl:')
oviCoords = RDFcoords('', 'ovi:')
cidocCoords = RDFcoords('', 'crm:')
tgnCoords = RDFcoords('', 'tgn:')
nsCoords = RDFcoords('', 'rdf:')
schemaCoords = RDFcoords('', 'rdfs:')
devCoords = RDFcoords('', 'dev:')
owlCoords = RDFcoords('', 'owl:')
# Basic functions for triples / shortened triples in TTL format
def triple(subject, predicate, object1):
line = subject + ' ' + predicate + ' ' + object1
return line
def doublet(predicate, object1):
line = ' ' + predicate + ' ' + object1
return line
def singlet(object1):
line = ' ' + object1
return line
# Line endings in TTL format
continueLine1 = ' ;\n'
continueLine2 = ' ,\n'
closeLine = ' .\n'
def writeTTLHeader(output):
output.write('@prefix ' + placeCoords.prefix + ' ' + placeCoords.uri + closeLine)
output.write('@prefix ' + aspoCoords.prefix + ' ' + aspoCoords.uri + closeLine)
output.write('@prefix ' + oviCoords.prefix + ' ' + oviCoords.uri + closeLine)
output.write('@prefix ' + cidocCoords.prefix + ' ' + cidocCoords.uri + closeLine)
output.write('@prefix ' + tgnCoords.prefix + ' ' + tgnCoords.uri + closeLine)
output.write('@prefix ' + schemaCoords.prefix + ' ' + schemaCoords.uri + closeLine)
output.write('@prefix ' + nsCoords.prefix + ' ' + nsCoords.uri + closeLine)
output.write('@prefix ' + devCoords.prefix + ' ' + devCoords.uri + closeLine)
output.write('@prefix ' + owlCoords.prefix + ' ' + owlCoords.uri + closeLine)
output.write('\n')
filePrefix = 'toponimi_'
fileType = 'OVI_ASPO'
max_entries = 1000000000
with open(import_dir + filePrefix + fileType + '.csv', newline="") as csv_file, open(
export_dir + filePrefix + fileType + '.ttl', 'w') as output:
reader = csv.DictReader(csv_file)
writeTTLHeader(output)
first = True
ii = 0
for row in reader:
# The index ii is used to process a limited number of entries for testing purposes
ii = ii + 1
if row['ID ASPO'] != "":
#placeHolders
top = row['toponimo'].replace(" ", "_")
toponimo = top.translate(str.maketrans('', '', string.punctuation))
E73placeHolder = ''
topPlaceHolder = oviCoords.prefix + toponimo
devPlaceHolder = devCoords.prefix + row['ID RESTORE']
line = triple(E73placeHolder,
cidocCoords.prefix + 'P67_refers_to',
topPlaceHolder) + closeLine
output.write(line)
line = triple(topPlaceHolder,
nsCoords.prefix + 'type',
cidocCoords.prefix + 'E41_Appellation') + closeLine
output.write(line)
label = string.capwords(row['toponimo'])
line = triple(topPlaceHolder,
schemaCoords.prefix + 'label',
'\"' + label + '\"') + closeLine
output.write(line)
line = triple(topPlaceHolder,
cidocCoords.prefix + 'P2_has_type',
'\"Toponimo\"') + closeLine
output.write(line)
line = triple(devPlaceHolder,
cidocCoords.prefix + 'P1_is_identified_by',
topPlaceHolder) + closeLine
output.write(line)
oviPlaceHolder = oviCoords.prefix + toponimo + "_E53_OVI"
line = triple(oviPlaceHolder,
owlCoords.prefix + 'sameAs',
devPlaceHolder) + closeLine
output.write(line)
output.write('\n')
#
#
# Limit number of entries processed (if desired)
if (ii > max_entries):
break