@@ -129,7 +129,7 @@ def generalize_tsv(tsvfile, output, class_name, schema_name, pandera: bool, anno
129
129
130
130
Example:
131
131
132
- schemauto generalize-tsv --class-name Person --schema-name PersonInfo my/data/persons.tsv
132
+ `` schemauto generalize-tsv --class-name Person --schema-name PersonInfo my/data/persons.tsv``
133
133
"""
134
134
kwargs = {k :v for k , v in kwargs .items () if v is not None }
135
135
if pandera :
@@ -161,11 +161,11 @@ def generalize_tsvs(tsvfiles, output, schema_name, **kwargs):
161
161
162
162
See :ref:`generalizers` for more on the generalization framework
163
163
164
- This uses :ref:` CsvDataGeneralizer.convert_multiple`
164
+ This uses CsvDataGeneralizer.convert_multiple
165
165
166
166
Example:
167
167
168
- schemauto generalize-tsvs --class-name Person --schema-name PersonInfo my/data/*.tsv
168
+ `` schemauto generalize-tsvs --class-name Person --schema-name PersonInfo my/data/*.tsv``
169
169
"""
170
170
ie = CsvDataGeneralizer (** kwargs )
171
171
schema = ie .convert_multiple (tsvfiles , schema_name = schema_name )
@@ -229,7 +229,7 @@ def import_dosdps(dpfiles, output, **args):
229
229
230
230
Example:
231
231
232
- schemauto import-dosdps --range-as-enums patterns/*yaml -o my-schema.yaml
232
+ `` schemauto import-dosdps --range-as-enums patterns/*. yaml -o my-schema.yaml``
233
233
"""
234
234
ie = DOSDPImportEngine ()
235
235
schema = ie .convert (dpfiles , ** args )
@@ -309,7 +309,7 @@ def generalize_json(input, output, schema_name, depluralize: bool, format, omit_
309
309
310
310
Example:
311
311
312
- schemauto generalize-json my/data/persons.json -o my.yaml
312
+ `` schemauto generalize-json my/data/persons.json -o my.yaml``
313
313
"""
314
314
ie = JsonDataGeneralizer (omit_null = omit_null , depluralize_class_names = depluralize )
315
315
if inlined_map :
@@ -336,7 +336,7 @@ def generalize_toml(input, output, schema_name, omit_null, **kwargs):
336
336
337
337
Example:
338
338
339
- schemauto generalize-toml my/data/conf.toml -o my.yaml
339
+ `` schemauto generalize-toml my/data/conf.toml -o my.yaml``
340
340
"""
341
341
ie = JsonDataGeneralizer (omit_null = omit_null )
342
342
schema = ie .convert (input , format = 'toml' , ** kwargs )
@@ -365,7 +365,7 @@ def import_json_schema(input, output, import_project: bool, schema_name, format,
365
365
366
366
Example:
367
367
368
- schemauto import-json-schema my/schema/personinfo.schema.json
368
+ `` schemauto import-json-schema my/schema/personinfo.schema.json``
369
369
"""
370
370
ie = JsonSchemaImportEngine (** kwargs )
371
371
if not import_project :
@@ -390,7 +390,7 @@ def import_kwalify(input, output, schema_name, **kwargs):
390
390
391
391
Example:
392
392
393
- schemauto import-kwalify my/schema/personinfo.kwalify.yaml
393
+ `` schemauto import-kwalify my/schema/personinfo.kwalify.yaml``
394
394
"""
395
395
ie = KwalifyImportEngine (** kwargs )
396
396
schema = ie .convert (input , output , name = schema_name , format = format )
@@ -409,7 +409,7 @@ def import_frictionless(input, output, schema_name, schema_id, **kwargs):
409
409
410
410
Example:
411
411
412
- schemauto import-frictionless cfde.package.json
412
+ `` schemauto import-frictionless cfde.package.json``
413
413
"""
414
414
ie = FrictionlessImportEngine (** kwargs )
415
415
schema = ie .convert (input , name = schema_name , id = schema_id )
@@ -429,7 +429,7 @@ def import_cadsr(input, output, schema_name, schema_id, **kwargs):
429
429
430
430
Example:
431
431
432
- schemauto import-cadsr "cdes/*.json"
432
+ `` schemauto import-cadsr "cdes/*.json"``
433
433
"""
434
434
ie = CADSRImportEngine ()
435
435
paths = [str (gf .absolute ()) for gf in Path ().glob (input ) if gf .is_file ()]
@@ -460,7 +460,7 @@ def import_owl(owlfile, output, **args):
460
460
461
461
Example:
462
462
463
- schemauto import-owl prov.ofn -o my.yaml
463
+ `` schemauto import-owl prov.ofn -o my.yaml``
464
464
"""
465
465
sie = OwlImportEngine ()
466
466
schema = sie .convert (owlfile , ** args )
@@ -509,7 +509,7 @@ def generalize_rdf(rdffile, dir, output, **args):
509
509
510
510
Example:
511
511
512
- schemauto generalize-rdf my/data/persons.ttl
512
+ `` schemauto generalize-rdf my/data/persons.ttl``
513
513
"""
514
514
sie = RdfDataGeneralizer ()
515
515
if not os .path .exists (dir ):
@@ -539,13 +539,13 @@ def annotate_schema(schema: str, input: str, output: str, **kwargs):
539
539
540
540
Example:
541
541
542
- schemauto annotate-schema -i bioportal: my-schema.yaml -o annotated.yaml
542
+ `` schemauto annotate-schema -i bioportal: my-schema.yaml -o annotated.yaml``
543
543
544
544
This will require you setting the API key via OAK - see OAK docs.
545
545
546
546
You can specify a specific ontology
547
547
548
- schemauto annotate-schema -i bioportal:ncbitaxon my-schema.yaml -o annotated.yaml
548
+ `` schemauto annotate-schema -i bioportal:ncbitaxon my-schema.yaml -o annotated.yaml``
549
549
550
550
In future OAK will support a much wider variety of annotators including:
551
551
@@ -594,13 +594,13 @@ def enrich_using_ontology(schema: str, input: str, output: str, annotate: bool,
594
594
595
595
Example:
596
596
597
- schemauto enrich-using-ontology -i bioportal: my-schema.yaml -o my-enriched.yaml
597
+ `` schemauto enrich-using-ontology -i bioportal: my-schema.yaml -o my-enriched.yaml``
598
598
599
599
If your schema has no mappings you can use --annotate to add them
600
600
601
601
Example:
602
602
603
- schemauto enrich-using-ontology -i so.obo --annotate my-schema.yaml -o my-enriched.yaml --annotate
603
+ `` schemauto enrich-using-ontology -i so.obo --annotate my-schema.yaml -o my-enriched.yaml --annotate``
604
604
"""
605
605
impl = get_implementation_from_shorthand (input )
606
606
annr = SchemaAnnotator (impl )
@@ -630,7 +630,7 @@ def enrich_using_llm(schema: str, model: str, output: str, **args):
630
630
631
631
Example:
632
632
633
- pip install schema-automator[llm]
633
+ `` pip install schema-automator[llm]``
634
634
635
635
"""
636
636
logging .info (f"Enriching: { schema } " )
0 commit comments