123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104 |
- # Utilities to read/write csv files
- import csv
- # Utilities to handle character encodings
- import unicodedata
- # Ordered Dicts
- from collections import OrderedDict
- import json
- # OPZIONAL IMPORTS
- # For timestamping/simple speed tests
- from datetime import datetime
- # Random number generator
- from random import *
- # System & command line utilities
- import sys
- # Json for the dictionary
- import json
- import_dir = '/Users/leonardocanova/Library/CloudStorage/OneDrive-UniversityofPisa(1)/Documenti/Progetti università/OVI/Programmazione/ASPO/Luoghi/'
- export_dir = '/Users/leonardocanova/Library/CloudStorage/OneDrive-UniversityofPisa(1)/Documenti/Progetti università/OVI/Programmazione/ASPO/Luoghi/'
- # Custom class to store URIs + related infos for the ontologies/repositories
- class RDFcoords:
- def __init__(self, uri, prefix, code = None):
- self.uri = uri
- self.prefix = prefix
- self.code = code
- # Repositories
- cidocCoords = RDFcoords('<http://www.cidoc-crm.org/cidoc-crm/>', 'crm:')
- tgnCoords = RDFcoords('<http://vocab.getty.edu/tgn/>', 'tgn:')
- nsCoords = RDFcoords('<http://www.w3.org/1999/02/22-rdf-syntax-ns#>', 'rdf:')
- schemaCoords = RDFcoords('<http://www.w3.org/2000/01/rdf-schema#>', 'rdfs:')
- owlCoords = RDFcoords('<http://www.w3.org/2002/07/owl#>', 'owl:')
- devCoords = RDFcoords('<http://dev.restore.ovi.cnr.it/vocabularies/places/>', 'dev:')
- # Basic functions for triples / shortened triples in TTL format
- def triple(subject, predicate, object1):
- line = subject + ' ' + predicate + ' ' + object1
- return line
- def doublet(predicate, object1):
- line = ' ' + predicate + ' ' + object1
- return line
- def singlet(object1):
- line = ' ' + object1
- return line
- # Line endings in TTL format
- continueLine1 = ' ;\n'
- continueLine2 = ' ,\n'
- closeLine = ' .\n'
- def writeTTLHeader(output):
- output.write('@prefix ' + cidocCoords.prefix + ' ' + cidocCoords.uri + closeLine)
- output.write('@prefix ' + tgnCoords.prefix + ' ' + tgnCoords.uri + closeLine)
- output.write('@prefix ' + schemaCoords.prefix + ' ' + schemaCoords.uri + closeLine)
- output.write('@prefix ' + nsCoords.prefix + ' ' + nsCoords.uri + closeLine)
- output.write('@prefix ' + owlCoords.prefix + ' ' + owlCoords.uri + closeLine)
- output.write('@prefix ' + devCoords.prefix + ' ' + devCoords.uri + closeLine)
- output.write('\n')
- file = "merge_luoghi_ASPO - province_comuni_UNIQUE"
- max_entries = 1000000000
- with open(import_dir + file + '.csv', newline="") as csv_file, open(
- export_dir + file + '.ttl', 'w') as output:
- reader = csv.DictReader(csv_file)
- writeTTLHeader(output)
- first = True
- ii = 0
- for row in reader:
- # The index ii is used to process a limited number of entries for testing purposes
- ii = ii + 1
- #placeHolders
- devPlaceHolder = devCoords.prefix + row['ID_PROVINCIA']
-
- if row['ID_RESTORE_comune'] != "":
- devPlaceHolder_comune = devCoords.prefix + row['ID_RESTORE_comune']
- line = triple(devPlaceHolder_comune,
- cidocCoords.prefix + 'P89_falls_within',
- devPlaceHolder) + closeLine
- output.write(line)
- output.write('\n')
- #
- #
- # Limit number of entries processed (if desired)
- if (ii > max_entries):
- break
|