#Parser to convert the Datini onomastics CSV file into TTL format
# Utilities to read/write csv files
import csv
# Utilities to handle character encodings
import unicodedata
# Ordered Dicts
from collections import OrderedDict
import json
import re
# OPZIONAL IMPORTS
# For timestamping/simple speed tests
from datetime import datetime
# Random number generator
from random import *
# System & command line utilities
import sys
# Json for the dictionary
import json
import_dir = '/Users/federicaspinelli/TEAMOVI/Parser/DATA/ASPO/CSV/ospedale/'
export_dir = '/Users/federicaspinelli/TEAMOVI/Parser/DATA/ASPO/RDF/ospedale/'
# Custom class to store URIs + related infos for the ontologies/repositories
class RDFcoords:
def __init__(self, uri, prefix, code = None):
self.uri = uri
self.prefix = prefix
self.code = code
# Repositories
aspoCoords = RDFcoords('', 'aspo:')
foafCoords = RDFcoords('', 'foaf:')
cidocCoords = RDFcoords('', 'crm:')
schemaCoords = RDFcoords('', 'schema:')
personCoords = RDFcoords('', 'person:')
nsCoords = RDFcoords('', 'rdf:')
rdfsCoords = RDFcoords('', 'rdfs:')
owlCoords = RDFcoords('', 'owl:')
# Basic functions for triples / shortened triples in TTL format
def triple(subject, predicate, object1):
line = subject + ' ' + predicate + ' ' + object1
return line
def doublet(predicate, object1):
line = ' ' + predicate + ' ' + object1
return line
def singlet(object1):
line = ' ' + object1
return line
# Line endings in TTL format
continueLine1 = ' ;\n'
continueLine2 = ' ,\n'
closeLine = ' .\n'
def writeTTLHeader(output):
output.write('@prefix ' + aspoCoords.prefix + ' ' + aspoCoords.uri + closeLine)
output.write('@prefix ' + foafCoords.prefix + ' ' + foafCoords.uri + closeLine)
output.write('@prefix ' + cidocCoords.prefix + ' ' + cidocCoords.uri + closeLine)
output.write('@prefix ' + personCoords.prefix + ' ' + personCoords.uri + closeLine)
output.write('@prefix ' + schemaCoords.prefix + ' ' + schemaCoords.uri + closeLine)
output.write('@prefix ' + nsCoords.prefix + ' ' + nsCoords.uri + closeLine)
output.write('@prefix ' + rdfsCoords.prefix + ' ' + rdfsCoords.uri + closeLine)
output.write('@prefix ' + owlCoords.prefix + ' ' + owlCoords.uri + closeLine)
output.write('\n')
filePrefix = 'OSPEDALE - onomastica '
fileType = '- persone singole'
max_entries = 10000000000000
with open(import_dir + filePrefix + fileType + '.csv', newline="") as csv_file, open(
export_dir + filePrefix + fileType + '.ttl', 'w') as output:
reader = csv.DictReader(csv_file)
writeTTLHeader(output)
first = True
ii = 0
for row in reader:
# The index ii is used to process a limited number of entries for testing purposes
ii = ii + 1
if row['entityType'] == 'person':
id_aspo = row['recordId']
#placeHolders
aspoPlaceHolder = aspoCoords.prefix + id_aspo
line = triple(aspoPlaceHolder,
nsCoords.prefix + 'type',
cidocCoords.prefix + 'E21_Person') + closeLine
output.write(line)
line = triple(aspoPlaceHolder,
nsCoords.prefix + 'type',
personCoords.prefix + 'Person') + closeLine
output.write(line)
line = triple(aspoPlaceHolder,
nsCoords.prefix + 'type',
foafCoords.prefix + 'person') + closeLine
output.write(line)
line = triple(aspoPlaceHolder,
cidocCoords.prefix + 'P1_is_identified_by',
aspoPlaceHolder + "_E42") + closeLine
output.write(line)
line = triple(aspoPlaceHolder + "_E42",
nsCoords.prefix + 'type',
cidocCoords.prefix + 'E42_Identifier') + closeLine
output.write(line)
line = triple(aspoPlaceHolder + "_E42",
rdfsCoords.prefix + 'label',
'\"' + id_aspo + '\"') + closeLine
output.write(line)
line = triple(aspoPlaceHolder,
foafCoords.prefix + 'name',
'\"' + row['nameEntry@normal'] + '\"') + closeLine
output.write(line)
line = triple(aspoPlaceHolder,
rdfsCoords.prefix + 'label',
'\"' + row['nameEntry@normal'] + '\"') + closeLine
output.write(line)
if row['nome proprio'] != '':
#Remove all white-space characters:
txt = row['nome proprio']
x = re.sub(" \n", "", txt)
y = re.sub("\s\s", "", x)
name = re.sub("\n", "", y)
line = triple(aspoPlaceHolder,
foafCoords.prefix + 'givenName',
'\"' + name + '\"') + closeLine
output.write(line)
if row['nome di famiglia'] != '':
#Remove all white-space characters:
txt = row['nome di famiglia']
x = re.sub("\n", " ", txt)
y = re.sub("\s\s", "", x)
line = triple(aspoPlaceHolder,
foafCoords.prefix + 'familyName',
'\"' + y + '\"') + closeLine
output.write(line)
if row['Alias'] != '' and row['Alias'] != ' ':
#Remove all white-space characters:
txt = row['Alias']
x = re.sub("\n", " ", txt)
y = re.sub("\s\s", "", x)
line = triple(aspoPlaceHolder,
schemaCoords.prefix + 'alternateName',
'\"' + y + '\"') + closeLine
output.write(line)
if row['genere'] != '':
#Remove all white-space characters:
txt = row['genere']
x = re.sub("\n", " ", txt)
y = re.sub("\s\s", "", x)
line = triple(aspoPlaceHolder,
foafCoords.prefix + 'gender',
'\"' + y + '\"') + closeLine
output.write(line)
if row['patronimico/matronimico'] != '':
#Remove all white-space characters:
txt = row['patronimico/matronimico']
x = re.sub("\n", " ", txt)
y = re.sub("\s\s", "", x)
line = triple(aspoPlaceHolder,
personCoords.prefix + 'patronymicName',
'\"' + y + '\"') + closeLine
output.write(line)
if row['occupation'] != '' and row['occupation'] != ' ' :
occupazioni = []
pipe = "|"
if pipe in row['occupation']:
occupazioni = row['occupation'].split('|')
for occupazione in occupazioni:
#Remove all white-space characters:
txt = occupazione
x = re.sub("\n", " ", txt)
y = re.sub("\s\s", "", x)
occ = re.sub(r'[^A-Za-z]','', y)
occupationPlaceHolder = ''
line = triple(aspoPlaceHolder,
schemaCoords.prefix + 'hasOccupation',
occupationPlaceHolder) + closeLine
output.write(line)
line = triple(occupationPlaceHolder,
nsCoords.prefix + 'type',
schemaCoords.prefix + 'Occupation') + closeLine
output.write(line)
line = triple(occupationPlaceHolder,
rdfsCoords.prefix + 'label',
'\"' + y + '\"') + closeLine
output.write(line)
else:
#Remove all white-space characters:
txt = row['occupation']
x = re.sub("\n", " ", txt)
y = re.sub("\s\s", "", x)
occ = re.sub(r'[^A-Za-z]','', y)
occupationPlaceHolder = ''
line = triple(aspoPlaceHolder,
schemaCoords.prefix + 'hasOccupation',
occupationPlaceHolder) + closeLine
output.write(line)
line = triple(occupationPlaceHolder,
nsCoords.prefix + 'type',
schemaCoords.prefix + 'Occupation') + closeLine
output.write(line)
line = triple(occupationPlaceHolder,
rdfsCoords.prefix + 'label',
'\"' + y + '\"') + closeLine
output.write(line)
if (row['avo 1'] != ''):
id = row['avo 1']
E13placeHolder = '"
line = triple(E13placeHolder,
nsCoords.prefix + 'type',
cidocCoords.prefix + 'E13_Attribute_Assignment') + closeLine
output.write(line)
line = triple(E13placeHolder, cidocCoords.prefix + 'P141_assigned', aspoPlaceHolder) + closeLine
output.write(line)
line = triple(E13placeHolder,
rdfsCoords.prefix + 'label',
'\"Relazione: ' + row['avo 1'] + ' avo di secondo grado di ' + row['recordId'] + '\"') + closeLine
output.write(line)
if re.match(r'IT-ASPO', id):
relazioneid = '"
line = triple(relazioneid, cidocCoords.prefix + 'P141_assigned', E13placeHolder) + closeLine
output.write(line)
else:
relazionenoid = '"
line = triple(relazionenoid, cidocCoords.prefix + 'P141_assigned', E13placeHolder ) + closeLine
output.write(line)
line = triple(relazionenoid,
rdfsCoords.prefix + 'label',
'\"' + row['avo 1'] + '\"') + closeLine
output.write(line)
line = triple(relazionenoid,
nsCoords.prefix + 'type',
cidocCoords.prefix + 'E21_Person') + closeLine
output.write(line)
line = triple(relazionenoid,
nsCoords.prefix + 'type',
personCoords.prefix + 'Person') + closeLine
output.write(line)
line = triple(relazionenoid,
nsCoords.prefix + 'type',
foafCoords.prefix + 'person') + closeLine
output.write(line)
E55placeHolder = ''
line = triple(E13placeHolder, cidocCoords.prefix + 'P42_assigned', E55placeHolder) + closeLine
output.write(line)
line = triple(E55placeHolder,
rdfsCoords.prefix + 'label',
'\"Avo di secondo grado\"') + closeLine
output.write(line)
if (row['avo 2'] != ''):
id = row['avo 2']
E13placeHolder = '"
line = triple(E13placeHolder,
nsCoords.prefix + 'type',
cidocCoords.prefix + 'E13_Attribute_Assignment') + closeLine
output.write(line)
line = triple(E13placeHolder, cidocCoords.prefix + 'P141_assigned', aspoPlaceHolder) + closeLine
output.write(line)
line = triple(E13placeHolder,
rdfsCoords.prefix + 'label',
'\"Relazione: ' + row['avo 2'] + ' avo di terzo grado di ' + row['recordId'] + '\"') + closeLine
output.write(line)
if re.match(r'IT-ASPO', id):
relazioneid = '"
line = triple(relazioneid, cidocCoords.prefix + 'P141_assigned', E13placeHolder) + closeLine
output.write(line)
else:
relazionenoid = '"
line = triple(relazionenoid, cidocCoords.prefix + 'P141_assigned', E13placeHolder ) + closeLine
output.write(line)
line = triple(relazionenoid,
rdfsCoords.prefix + 'label',
'\"' + row['avo 2'] + '\"') + closeLine
output.write(line)
line = triple(relazionenoid,
nsCoords.prefix + 'type',
cidocCoords.prefix + 'E21_Person') + closeLine
output.write(line)
line = triple(relazionenoid,
nsCoords.prefix + 'type',
personCoords.prefix + 'Person') + closeLine
output.write(line)
line = triple(relazionenoid,
nsCoords.prefix + 'type',
foafCoords.prefix + 'person') + closeLine
output.write(line)
E55placeHolder = ''
line = triple(E13placeHolder, cidocCoords.prefix + 'P42_assigned', E55placeHolder) + closeLine
output.write(line)
line = triple(E55placeHolder,
rdfsCoords.prefix + 'label',
'\"Avo di terzo grado\"') + closeLine
output.write(line)
if row['Qualifica'] != '':
qualifiche = []
pipe = "|"
if pipe in row['Qualifica']:
qualifiche = row['Qualifica'].split('|')
for qualifica in qualifiche:
#Remove all white-space characters:
txt = qualifica
x = re.sub("\n", " ", txt)
y = re.sub("\s\s", " ", x)
line = triple(aspoPlaceHolder, schemaCoords.prefix + 'honorificPrefix', '\"' + str(y) + '\"') + closeLine
output.write(line)
else:
#Remove all white-space characters:
txt = row['Qualifica']
x = re.sub("\n", " ", txt)
y = re.sub("\s\s", " ", x)
line = triple(aspoPlaceHolder, schemaCoords.prefix + 'honorificPrefix', '\"' + y + '\"') + closeLine
output.write(line)
if row['biogHist p'] != '':
#Remove all white-space characters:
txt = row['biogHist p']
x = re.sub("\n", " ", txt)
y = re.sub("\s\s", " ", x)
note = re.sub("\"", "", x)
line = triple(aspoPlaceHolder,
cidocCoords.prefix + 'P3_has_note',
'\"' + note + '\"') + closeLine
output.write(line)
if row['Variante'] != '':
varianti = []
pipe = "|"
if pipe in row['Variante']:
varianti = row['Variante'].split('|')
for variante in varianti:
line = triple(aspoPlaceHolder,
owlCoords.prefix + 'sameAs',
aspoCoords.prefix + str(variante)) + closeLine
output.write(line)
else:
line = triple(aspoPlaceHolder,
owlCoords.prefix + 'sameAs',
aspoCoords.prefix + row['Variante']) + closeLine
output.write(line)
if row['provenienza'] != '':
e53placeHolder = ""
line = triple(aspoPlaceHolder,
cidocCoords.prefix + 'P74_has_current_or_former_residence',
e53placeHolder) + closeLine
output.write(line)
line = triple(e53placeHolder,
nsCoords.prefix + 'type',
cidocCoords.prefix + 'E53_Place') + closeLine
output.write(line)
line = triple(e53placeHolder,
rdfsCoords.prefix + 'label',
'\"' + row['provenienza'] + '\"') + closeLine
output.write(line)
line = triple(e53placeHolder,
cidocCoords.prefix + 'P2_has_type',
'\"Provenienza\"') + closeLine
output.write(line)
if (row['recordID relazione'] != ''):
id = row['recordID relazione']
E13placeHolder = '"
line = triple(E13placeHolder,
nsCoords.prefix + 'type',
cidocCoords.prefix + 'E13_Attribute_Assignment') + closeLine
output.write(line)
line = triple(E13placeHolder, cidocCoords.prefix + 'P141_assigned', aspoPlaceHolder) + closeLine
output.write(line)
line = triple(E13placeHolder,
rdfsCoords.prefix + 'label',
'\"Relazione: ' + row['recordID relazione'] + row['nome relazione'] + ' di ' + row['recordId'] + '\"') + closeLine
output.write(line)
if re.match(r'IT-ASPO', id):
relazioneid = '"
line = triple(relazioneid, cidocCoords.prefix + 'P141_assigned', E13placeHolder) + closeLine
output.write(line)
else:
relazionenoid = '"
line = triple(relazionenoid, cidocCoords.prefix + 'P141_assigned', E13placeHolder) + closeLine
output.write(line)
line = triple(relazionenoid,
rdfsCoords.prefix + 'label',
'\"' + row['recordID relazione'] + '\"') + closeLine
output.write(line)
line = triple(relazionenoid,
nsCoords.prefix + 'type',
cidocCoords.prefix + 'E21_Person') + closeLine
output.write(line)
line = triple(relazionenoid,
nsCoords.prefix + 'type',
personCoords.prefix + 'Person') + closeLine
output.write(line)
line = triple(relazionenoid,
nsCoords.prefix + 'type',
foafCoords.prefix + 'person') + closeLine
output.write(line)
if (row['nome relazione'] != ''):
relazioni = []
pipe = "|"
if pipe in row['nome relazione']:
relazioni = row['nome relazione'].split('|')
for relazione in relazioni:
#Remove all white-space characters:
txt = relazione
x = re.sub("\n", " ", txt)
y = re.sub("\s\s", "", x)
rel = re.sub(r'[^A-Za-z]','', y)
cleanlabel = rel.rstrip().lstrip()
E55placeHolder = ''
line = triple(E13placeHolder, cidocCoords.prefix + 'P42_assigned', E55placeHolder) + closeLine
output.write(line)
line = triple(E55placeHolder,
rdfsCoords.prefix + 'label',
'\"' + cleanlabel + '\"') + closeLine
output.write(line)
else:
E55placeHolder = ''
line = triple(E13placeHolder, cidocCoords.prefix + 'P42_assigned', E55placeHolder) + closeLine
output.write(line)
cleanlabel = row['nome relazione'].rstrip().lstrip()
line = triple(E55placeHolder,
rdfsCoords.prefix + 'label',
'\"' + cleanlabel + '\"') + closeLine
output.write(line)
output.write('\n')
#
#
# Limit number of entries processed (if desired)
if (ii > max_entries):
break