ontology_parser.py 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248
  1. # %%
  2. import csv
  3. import json
  4. # BASIC CONFIGURATION
  5. DATA_FOLDER = './data/'
  6. ONTO_FILENAME = 'man_draft' # No extension!
  7. ent_filename = ONTO_FILENAME + '_entities.csv'
  8. rel_filename = ONTO_FILENAME + '_relations.csv'
  9. # %%
  10. # PART I: parse xlsx to (multiple) csv
  11. # CONFIGURATION
  12. XLSX_FILENAME = ONTO_FILENAME + '.xlsx'
  13. ENTITIES_SHEETNAME = 'Entità'
  14. RELATIONS_SHEETNAME = 'Relazioni'
  15. # %%
  16. # Import xlsx through openpyxl
  17. import openpyxl as op
  18. input_data = op.load_workbook(DATA_FOLDER + XLSX_FILENAME) # Explicitly specify the encoding??
  19. entities_sheet = input_data[ENTITIES_SHEETNAME]
  20. relations_sheet = input_data[RELATIONS_SHEETNAME]
  21. # %%
  22. # Export sheet data to csv
  23. with open(DATA_FOLDER + ent_filename, 'w', encoding='utf-8') as out_file:
  24. writer = csv.writer(out_file)
  25. writer.writerows(entities_sheet.values)
  26. with open(DATA_FOLDER + rel_filename, 'w', encoding='utf-8') as out_file:
  27. writer = csv.writer(out_file)
  28. writer.writerows(relations_sheet.values)
  29. # %%
  30. # PART II: collect csv data into a 'pre-ontology' structure
  31. # Read csv files back in (or use them directly as starting points)
  32. HEADER_ROW = True
  33. # Not difficult to add more keys (column names)
  34. ENTITIES_COLUMN_LABEL = 'ENTITÀ'
  35. ATTRIBUTES_COLUMN_LABEL = 'ATTRIBUTO (LITERAL)'
  36. SAMEAS_COLUMN_LABEL = 'SAME AS'
  37. #
  38. RELATION_FIRST_COLUMN_LABEL = 'ENTITÀ 1'
  39. RELATION_SECOND_COLUMN_LABEL = 'ENTITÀ 2'
  40. RELATION_NAME_COLUMN_LABEL = 'NOME RELAZIONE'
  41. INVERSE_RELATION_COLUMN_LABEL = 'NOME RELAZIONE INVERSA'
  42. #
  43. with open(DATA_FOLDER + ent_filename, 'r', encoding='utf-8') as in_file:
  44. if HEADER_ROW:
  45. reader = csv.DictReader(in_file)
  46. else:
  47. reader = csv.DictReader(in_file, fieldnames=[RELATION_FIRST_COLUMN_LABEL, RELATION_SECOND_COLUMN_LABEL, RELATION_NAME_COLUMN_LABEL, INVERSE_RELATION_COLUMN_LABEL])
  48. entities = [row for row in reader]
  49. with open(DATA_FOLDER + rel_filename, 'r', encoding='utf-8') as in_file:
  50. if HEADER_ROW:
  51. reader = csv.DictReader(in_file)
  52. else:
  53. reader = csv.DictReader(in_file, fieldnames=[ENTITIES_COLUMN_LABEL, ATTRIBUTES_COLUMN_LABEL, SAMEAS_COLUMN_LABEL])
  54. relations = [row for row in reader]
  55. # %%
  56. # From here on, work with the 'entities' and 'relations' lists of dicts. Arrange them in a nested structure, for convenience
  57. def dict_lists_to_json(entities_local, relations_local):
  58. entity = {}
  59. current_entity = None
  60. for row in entities_local:
  61. entity_name = row.get(ENTITIES_COLUMN_LABEL)
  62. attribute_name = row.get(ATTRIBUTES_COLUMN_LABEL)
  63. same_as_row = row.get(SAMEAS_COLUMN_LABEL)
  64. same_as_list = same_as_row.split(',') if same_as_row else []
  65. if entity_name:
  66. current_entity = entity_name
  67. entity[current_entity] = {}
  68. if current_entity and attribute_name:
  69. if not entity[current_entity].get('Attributi'):
  70. entity[current_entity]['Attributi'] = []
  71. entity[current_entity]['Attributi'].append(attribute_name)
  72. if current_entity and same_as_list:
  73. entity[current_entity]['Sinonimi'] = [s.strip() for s in same_as_list]
  74. # Add subclass information
  75. for row in relations_local:
  76. entity1 = row.get(RELATION_FIRST_COLUMN_LABEL)
  77. entity2 = row.get(RELATION_SECOND_COLUMN_LABEL)
  78. label = row.get(RELATION_NAME_COLUMN_LABEL)
  79. if label == "is_subclass_of":
  80. if entity1 in entity:
  81. entity[entity1]["Sottoclasse di"] = entity2
  82. # Construct relations
  83. entity_relations = []
  84. for row in relations_local:
  85. if row[RELATION_NAME_COLUMN_LABEL] != "is_subclass_of":
  86. relation = {
  87. "Entità 1": row[RELATION_FIRST_COLUMN_LABEL],
  88. "Entità 2": row[RELATION_SECOND_COLUMN_LABEL],
  89. "Etichetta": row[RELATION_NAME_COLUMN_LABEL],
  90. "Inversa": row[INVERSE_RELATION_COLUMN_LABEL]
  91. }
  92. entity_relations.append(relation)
  93. # Create final JSON structure
  94. data = {
  95. "Entità": entity,
  96. "Relazioni": entity_relations
  97. }
  98. return data
  99. # %%
  100. json_data = dict_lists_to_json(entities, relations)
  101. # Export data
  102. with open(DATA_FOLDER + ONTO_FILENAME + '.json', 'w') as out_json:
  103. json.dump(json_data, out_json, indent=2, ensure_ascii=False)
  104. # %%
  105. # Re-read the data and do a consistency check
  106. entity_set = set(json_data['Entità'].keys())
  107. entity_relations_set = {ent for rel in json_data['Relazioni'] for ent in [rel['Entità 1'], rel['Entità 2']]}
  108. # The check
  109. if not entity_relations_set.issubset(entity_set):
  110. print(entity_relations_set.difference(entity_set))
  111. # Commento su #any
  112. # %%
  113. # RDF Templates
  114. with open('./template.rdf', 'r') as in_file:
  115. RAW_RDF = in_file.read()
  116. ENTITY_TEMPLATE = '''
  117. <!-- http://www.h2iosc.it/onto##NAME# -->
  118. <owl:Class rdf:about="&h2iosc;#NAME#">
  119. <rdfs:label>#LABEL#</rdfs:label>
  120. <rdfs:subClassOf>#PARENT#</rdfs:subClassOf>
  121. </owl:Class>
  122. '''
  123. SUBCLASS_STRING = " <rdfs:subClassOf>#PARENT#</rdfs:subClassOf>\n"
  124. OBJECT_PROPERTY_TEMPLATE = '''
  125. <!-- http://www.h2iosc.it/onto##NAME# -->
  126. <owl:ObjectProperty rdf:about="&h2iosc;#NAME#">
  127. <rdfs:label>#LABEL#</rdfs:label>
  128. <rdfs:range rdf:resource="&h2iosc;#RANGE#"/>
  129. <rdfs:domain rdf:resource="&h2iosc;#DOMAIN#"/>
  130. </owl:ObjectProperty>
  131. '''
  132. OBJECT_PROPERTY_INVERSE_TEMPLATE = '''
  133. <!-- http://www.h2iosc.it/onto##NAME# -->
  134. <owl:ObjectProperty rdf:about="&h2iosc;#NAME#">
  135. <rdfs:label>#LABEL#</rdfs:label>
  136. <owl:inverseOf rdf:resource="&h2iosc;#INV#"/>
  137. <rdfs:range rdf:resource="&h2iosc;#RANGE#"/>
  138. <rdfs:domain rdf:resource="&h2iosc;#DOMAIN#"/>
  139. </owl:ObjectProperty>
  140. '''
  141. DATATYPE_PROPERTY_TEMPLATE = '''
  142. <!-- http://www.h2iosc.it/onto##NAME# -->
  143. <owl:DatatypeProperty rdf:about="&h2iosc;#NAME#">
  144. <rdfs:label>#LABEL#</rdfs:label>
  145. <rdfs:domain rdf:resource="&h2iosc;#DOMAIN#"/>
  146. </owl:DatatypeProperty>
  147. '''
  148. # Utility
  149. def normalize_label(label):
  150. return label.lower().replace(' ', '_').replace('à', 'a').replace('è', 'e').replace('é', 'e').replace('ì', 'i').replace('ò', 'o').replace('ù', 'u')
  151. # %%
  152. # CREATE RDF OUTPUT
  153. def create_rdf(data):
  154. entities_rdf_list = []
  155. datatype_properties_rdf_list = []
  156. for label, ent in data['Entità'].items():
  157. entity_name = normalize_label(label)
  158. entity_rdf = ENTITY_TEMPLATE.replace('#LABEL#', label).replace('#NAME#', entity_name)
  159. # Subclasses
  160. if 'Sottoclasse di' in ent.keys():
  161. parent = ent['Sottoclasse di']
  162. data['Relazioni'].append({"Entità 1": label,
  163. "Entità 2": parent,
  164. "Etichetta": "is_subclass_of", "Inversa": "is_superclass_of"})
  165. entity_rdf = entity_rdf.replace('#PARENT#', normalize_label(parent))
  166. else:
  167. entity_rdf = entity_rdf.replace(SUBCLASS_STRING, '')
  168. entities_rdf_list.append(entity_rdf)
  169. if not ent.get('Attributi'):
  170. continue
  171. for datatype_label in ent['Attributi']:
  172. datatype_name = normalize_label(datatype_label)
  173. datatype_properties_rdf_list.append(
  174. DATATYPE_PROPERTY_TEMPLATE.replace('#LABEL#', datatype_label).replace(
  175. '#NAME#', datatype_name
  176. ).replace('#DOMAIN#', entity_name)
  177. )
  178. relations_rdf_list = []
  179. for rel in data['Relazioni']:
  180. label = rel['Etichetta']
  181. inverse_label = rel['Inversa']
  182. domain = normalize_label(rel['Entità 1'])
  183. range1 = normalize_label(rel['Entità 2'])
  184. name = domain + '_' + normalize_label(label) + '_' + range1
  185. inverse_name = range1 + '_' + normalize_label(inverse_label) + '_' + domain
  186. #
  187. relation_rdf = OBJECT_PROPERTY_TEMPLATE.replace('#NAME#', name).replace('#LABEL#', label).replace('#DOMAIN#', domain).replace('#RANGE#', range1)
  188. #
  189. relation_inverse_rdf = OBJECT_PROPERTY_INVERSE_TEMPLATE.replace('#NAME#', inverse_name).replace('#LABEL#', inverse_label).replace('#DOMAIN#', range1).replace('#RANGE#', domain).replace('#INV#', name)
  190. #
  191. relation_full_rdf = relation_rdf + '\n\n\n' + relation_inverse_rdf
  192. relations_rdf_list.append(relation_full_rdf)
  193. to_out = RAW_RDF.replace(ENTITY_TEMPLATE, '\n\n\n'.join(entities_rdf_list)).replace(DATATYPE_PROPERTY_TEMPLATE, '\n\n\n'.join(datatype_properties_rdf_list)
  194. ).replace(OBJECT_PROPERTY_INVERSE_TEMPLATE, '\n\n\n'.join(relations_rdf_list))
  195. return to_out
  196. # %%
  197. rdf_data = create_rdf(json_data)
  198. # Export
  199. with open(DATA_FOLDER + ONTO_FILENAME + '.rdf', 'w') as out_file:
  200. out_file.write(rdf_data)
  201. # %%
  202. # https://service.tib.eu/webvowl/
  203. # %%