diff --git a/.dockerignore b/.dockerignore index c712142f..96c8053c 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,5 +1,6 @@ # Git -#.git +.git +.github .gitignore # Logs log/* @@ -8,3 +9,9 @@ tmp/* # Editor temp files *.swp *.swo +coverage +create_permissions.log +# Ignore generated test data +test/data/dictionary.txt +test/data/ontology_files/repo/**/* +test/data/tmp/* diff --git a/.github/workflows/docker-image.yml b/.github/workflows/docker-image.yml new file mode 100644 index 00000000..6105c1d8 --- /dev/null +++ b/.github/workflows/docker-image.yml @@ -0,0 +1,42 @@ +name: Docker Image CI + +on: + release: + types: [published] + +jobs: + push_to_registry: + name: Push Docker image to Docker Hub + runs-on: ubuntu-latest + steps: + - name: Check out the repo + uses: actions/checkout@v3 + + - name: Set up QEMU + uses: docker/setup-qemu-action@v2 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + - name: Log in to Docker Hub + uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Extract metadata (tags, labels) for Docker + id: meta + uses: docker/metadata-action@v4 + with: + images: bioportal/ncbo_cron + + - name: Build and push Docker image + uses: docker/build-push-action@v4 + with: + context: . + platforms: linux/amd64,linux/arm64 + build-args: | + RUBY_VERSION=2.7 + push: true + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} diff --git a/.github/workflows/ruby-unit-tests.yml b/.github/workflows/ruby-unit-tests.yml index de50c404..8a420ba1 100644 --- a/.github/workflows/ruby-unit-tests.yml +++ b/.github/workflows/ruby-unit-tests.yml @@ -7,18 +7,17 @@ on: jobs: test: strategy: + fail-fast: false matrix: - backend: ['ruby'] # ruby runs tests with 4store backend and ruby-agraph runs with AllegroGraph backend + backend: ['ncbo_cron'] # ruby runs tests with 4store backend and ruby-agraph runs with AllegroGraph backend runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - name: copy config.rb file from template run: cp config/config.rb.sample config/config.rb - name: Build docker-compose - working-directory: ./test run: docker-compose build - name: Run unit tests - working-directory: ./test run: | ci_env=`bash <(curl -s https://codecov.io/env)` docker-compose run $ci_env -e CI --rm ${{ matrix.backend }} bundle exec rake test TESTOPTS='-v' diff --git a/.gitignore b/.gitignore index c98b8d52..3fdd0c6d 100644 --- a/.gitignore +++ b/.gitignore @@ -5,6 +5,8 @@ config/config.rb config/appliance.rb config/config_*.rb config/*.p12 +config/*.json +data/ projectFilesBackup/ .ruby-version repo* diff --git a/Dockerfile b/Dockerfile index cd191621..73e1379c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -14,10 +14,16 @@ COPY Gemfile* *.gemspec /srv/ontoportal/ncbo_cron/ WORKDIR /srv/ontoportal/ncbo_cron +# set rubygem and bundler to the last version supported by ruby 2.7 +# remove version after ruby v3 upgrade +RUN gem update --system '3.4.22' +RUN gem install bundler -v '2.4.22' RUN gem update --system RUN gem install bundler ENV BUNDLE_PATH=/srv/ontoportal/bundle RUN bundle install COPY . /srv/ontoportal/ncbo_cron +RUN cp /srv/ontoportal/ncbo_cron/config/config.rb.sample /srv/ontoportal/ncbo_cron/config/config.rb + CMD ["/bin/bash"] diff --git a/Gemfile b/Gemfile index ebccbac6..54646cf4 100644 --- a/Gemfile +++ b/Gemfile @@ -2,13 +2,17 @@ source 'https://rubygems.org' gemspec -gem 'faraday', '~> 1.9' gem 'ffi' + +# This is needed temporarily to pull the Google Universal Analytics (UA) +# data and store it in a file. See (bin/import_google_ua_analytics_data) +# The ability to pull this data from Google will cease on July 1, 2024 gem "google-apis-analytics_v3" + +gem 'google-analytics-data' gem 'mail', '2.6.6' -gem 'minitest', '< 5.0' gem 'multi_json' -gem 'oj', '~> 2.0' +gem 'oj', '~> 3.0' gem 'parseconfig' gem 'pony' gem 'pry' @@ -21,13 +25,14 @@ gem 'request_store' # Monitoring gem 'cube-ruby', require: 'cube' -gem 'goo', github: 'ontoportal-lirmm/goo', branch: 'development' +gem 'goo', github: 'ontoportal-lirmm/goo', branch: 'master' gem 'sparql-client', github: 'ontoportal-lirmm/sparql-client', branch: 'master' -gem 'ontologies_linked_data', github: 'ontoportal-lirmm/ontologies_linked_data', branch: 'development' +gem 'ontologies_linked_data', github: 'ontoportal-lirmm/ontologies_linked_data', branch: 'master' gem 'ncbo_annotator', github: 'ontoportal-lirmm/ncbo_annotator', branch: 'master' # Testing group :test do gem 'email_spec' + gem 'minitest', '< 5.0' gem 'simplecov' gem 'simplecov-cobertura' # for codecov.io gem 'test-unit-minitest' diff --git a/Gemfile.lock b/Gemfile.lock index 65de9837..d752fe7d 100644 --- a/Gemfile.lock +++ b/Gemfile.lock @@ -1,7 +1,7 @@ GIT remote: https://github.com/ontoportal-lirmm/goo.git - revision: 9aa0ccacc9d76bff096218e6d48edc5b0bffd54f - branch: development + revision: 03da25b671d2ffa515b5dce51c6bd35980ae60c7 + branch: master specs: goo (0.0.2) addressable (~> 2.8) @@ -26,8 +26,8 @@ GIT GIT remote: https://github.com/ontoportal-lirmm/ontologies_linked_data.git - revision: 1cfaf4482b7bf8c9001f0ff309f2a0ff06f683b5 - branch: development + revision: e98b884999e5ce917a8be5fdc37f7b4797a1559e + branch: master specs: ontologies_linked_data (0.0.1) activesupport @@ -60,7 +60,7 @@ PATH ncbo_cron (0.0.1) dante goo - google-apis-analytics_v3 + google-analytics-data mlanett-redis-lock multi_json ncbo_annotator @@ -76,7 +76,9 @@ GEM multi_json (~> 1.0) addressable (2.8.6) public_suffix (>= 2.0.2, < 6.0) + base64 (0.2.0) bcrypt (3.1.20) + bigdecimal (3.1.5) binding_of_caller (1.0.0) debug_inspector (>= 0.0.1) builder (3.2.4) @@ -85,7 +87,7 @@ GEM connection_pool (2.4.1) cube-ruby (0.0.3) dante (0.2.0) - debug_inspector (1.1.0) + debug_inspector (1.2.0) declarative (0.0.20) docile (1.4.0) domain_name (0.6.20231109) @@ -93,30 +95,28 @@ GEM htmlentities (~> 4.3.3) launchy (~> 2.1) mail (~> 2.6) - faraday (1.10.3) - faraday-em_http (~> 1.0) - faraday-em_synchrony (~> 1.0) - faraday-excon (~> 1.1) - faraday-httpclient (~> 1.0) - faraday-multipart (~> 1.0) - faraday-net_http (~> 1.0) - faraday-net_http_persistent (~> 1.0) - faraday-patron (~> 1.0) - faraday-rack (~> 1.0) - faraday-retry (~> 1.0) + faraday (2.8.1) + base64 + faraday-net_http (>= 2.0, < 3.1) ruby2_keywords (>= 0.0.4) - faraday-em_http (1.0.0) - faraday-em_synchrony (1.0.0) - faraday-excon (1.1.0) - faraday-httpclient (1.0.1) - faraday-multipart (1.0.4) - multipart-post (~> 2) - faraday-net_http (1.0.1) - faraday-net_http_persistent (1.2.0) - faraday-patron (1.0.0) - faraday-rack (1.0.0) - faraday-retry (1.0.3) + faraday-net_http (3.0.2) + faraday-retry (2.2.0) + faraday (~> 2.0) ffi (1.16.3) + gapic-common (0.21.1) + faraday (>= 1.9, < 3.a) + faraday-retry (>= 1.0, < 3.a) + google-protobuf (~> 3.18) + googleapis-common-protos (>= 1.4.0, < 2.a) + googleapis-common-protos-types (>= 1.11.0, < 2.a) + googleauth (~> 1.9) + grpc (~> 1.59) + google-analytics-data (0.4.0) + google-analytics-data-v1beta (>= 0.7, < 2.a) + google-cloud-core (~> 1.6) + google-analytics-data-v1beta (0.10.0) + gapic-common (>= 0.20.0, < 2.a) + google-cloud-errors (~> 1.0) google-apis-analytics_v3 (0.13.0) google-apis-core (>= 0.11.0, < 2.a) google-apis-core (0.11.2) @@ -128,15 +128,29 @@ GEM retriable (>= 2.0, < 4.a) rexml webrick - google-cloud-env (2.0.1) + google-cloud-core (1.6.1) + google-cloud-env (>= 1.0, < 3.a) + google-cloud-errors (~> 1.0) + google-cloud-env (2.1.0) faraday (>= 1.0, < 3.a) - googleauth (1.9.0) + google-cloud-errors (1.3.1) + google-protobuf (3.25.1-x86_64-darwin) + googleapis-common-protos (1.4.0) + google-protobuf (~> 3.14) + googleapis-common-protos-types (~> 1.2) + grpc (~> 1.27) + googleapis-common-protos-types (1.11.0) + google-protobuf (~> 3.18) + googleauth (1.9.1) faraday (>= 1.0, < 3.a) - google-cloud-env (~> 2.0, >= 2.0.1) + google-cloud-env (~> 2.1) jwt (>= 1.4, < 3.0) multi_json (~> 1.11) os (>= 0.9, < 2.0) signet (>= 0.16, < 2.a) + grpc (1.60.0-x86_64-darwin) + google-protobuf (~> 3.25) + googleapis-common-protos-types (~> 1.0) htmlentities (4.3.4) http-accept (1.7.0) http-cookie (1.0.5) @@ -164,10 +178,10 @@ GEM mlanett-redis-lock (0.2.7) redis multi_json (1.15.0) - multipart-post (2.3.0) net-http-persistent (2.9.4) netrc (0.11.0) - oj (2.18.5) + oj (3.16.3) + bigdecimal (>= 3.0) omni_logger (0.1.4) logger os (1.1.4) @@ -186,7 +200,7 @@ GEM addressable (>= 2.2) redis (5.0.8) redis-client (>= 0.17.0) - redis-client (0.19.0) + redis-client (0.19.1) connection_pool representable (3.2.0) declarative (< 0.1.0) @@ -238,22 +252,21 @@ GEM PLATFORMS x86_64-darwin-23 - x86_64-linux DEPENDENCIES binding_of_caller (~> 1.0) cube-ruby email_spec - faraday (~> 1.9) ffi goo! + google-analytics-data google-apis-analytics_v3 mail (= 2.6.6) minitest (< 5.0) multi_json ncbo_annotator! ncbo_cron! - oj (~> 2.0) + oj (~> 3.0) ontologies_linked_data! parseconfig pony diff --git a/bin/import_google_ua_analytics_data b/bin/import_google_ua_analytics_data new file mode 100755 index 00000000..eadfd936 --- /dev/null +++ b/bin/import_google_ua_analytics_data @@ -0,0 +1,235 @@ +#!/usr/bin/env ruby +require 'logger' +require 'optparse' +require 'google/apis/analytics_v3' +require 'google/api_client/auth/key_utils' +require_relative '../lib/ncbo_cron/analytics/object_analytics_job' + +module NcboCron + module Models + + class OntologyVisitsAnalytics + def fetch_ua_object_analytics(logger, ua_conn) + @logger = logger + @ua_conn = ua_conn + aggregated_results = Hash.new + start_year = @start_date.year || 2013 + filter_str = (@analytics_filter.nil? || @analytics_filter.empty?) ? "" : ";#{@analytics_filter}" + + @ont_acronyms.each do |acronym| + max_results = 10000 + start_index = 1 + loop do + results = @ua_conn.run_request( + metrics: ['pageviews'], + dimensions: %w[pagePath year month], + filters: [['pagePath', "~^(\\/ontologies\\/#{acronym})(\\/?\\?{0}|\\/?\\?{1}.*)$#{filter_str}"]], + start_index: start_index, + max_results: max_results, + dates_ranges: [@start_date, Date.today.to_s], + sort: %w[year month] + ) + results.rows ||= [] + start_index += max_results + num_results = results.rows.length + @logger.info "Acronym: #{acronym}, Results: #{num_results}, Start Index: #{start_index}" + aggregated_results[acronym] = Hash.new unless aggregated_results.has_key?(acronym) + aggregate_results(aggregated_results[acronym], results.rows) + + if num_results < max_results + # fill up non existent years + (start_year..Date.today.year).each do |y| + aggregated_results[acronym] = Hash.new if aggregated_results[acronym].nil? + aggregated_results[acronym][y.to_s] = Hash.new unless aggregated_results[acronym].has_key?(y.to_s) + end + # fill up non existent months with zeros + (1..12).each { |n| aggregated_results[acronym].values.each { |v| v[n.to_s] = 0 unless v.has_key?(n.to_s) } } + break + end + end + end + sort_ga_data(aggregated_results) + end + end + + class UsersVisitsAnalytics + def fetch_ua_object_analytics(logger, ua_conn) + + aggregated_results = Hash.new + start_year = @start_date.year || 2013 + + max_results = 10000 + start_index = 1 + loop do + results = ua_conn.run_request( + metrics: ['newUsers'], + dimensions: %w[year month], + filters: [], + start_index: start_index, + max_results: max_results, + dates_ranges: [@start_date.to_s, Date.today.to_s], + sort: %w[year month] + ) + results.rows ||= [] + start_index += max_results + num_results = results.rows.length + logger.info "Results: #{num_results}, Start Index: #{start_index}" + + aggregate_results(aggregated_results, results.rows.map { |row| [-1] + row }) + + if num_results < max_results + # fill up non existent years + (start_year..Date.today.year).each do |y| + aggregated_results = Hash.new if aggregated_results.nil? + aggregated_results[y.to_s] = Hash.new unless aggregated_results.has_key?(y.to_s) + end + # fill up non existent months with zeros + (1..12).each { |n| aggregated_results.values.each { |v| v[n.to_s] = 0 unless v.has_key?(n.to_s) } } + break + end + end + { "all_users" => aggregated_results } + end + end + end + module GoogleAnalyticsUAMigrator + # Old version of Google Analytics connector + class GoogleAnalyticsUAConnector + def initialize(app_id:, app_name:, app_version:, analytics_key_file:, app_user:, start_date:, analytics_filter:) + @app_id = app_id + @app_name = app_name + @app_version = app_version + @analytics_key_file = analytics_key_file + @app_user = app_user + @generated_file_path = NcboCron.settings.analytics_path_to_ga_data_file + @start_date = start_date + @analytics_filter = analytics_filter + @ga_client = authenticate_google + end + + def run_request(metrics:, dimensions:, filters:, start_index:, max_results:, dates_ranges:, sort:) + @ga_client.get_ga_data( + ids = @app_id, + start_date = dates_ranges.first, + end_date = dates_ranges.last, + metrics = metrics.map { |m| "ga:#{m}" }.join(','), + { + dimensions: dimensions.map { |d| "ga:#{d}" }.join(','), + filters: filters.empty? ? nil : filters.map { |f, v| "ga:#{f}=#{v}" }.join(','), + start_index: start_index, + max_results: max_results, + sort: sort.map { |d| "ga:#{d}" }.join(',') + } + ) + end + + private + + def authenticate_google + Google::Apis::ClientOptions.default.application_name = @app_name + Google::Apis::ClientOptions.default.application_version = @app_version + # enable google api call retries in order to + # minigate analytics processing failure due to occasional google api timeouts and other outages + Google::Apis::RequestOptions.default.retries = 5 + # uncoment to enable logging for debugging purposes + # Google::Apis.logger.level = Logger::DEBUG + # Google::Apis.logger = @logger + client = Google::Apis::AnalyticsV3::AnalyticsService.new + key = Google::APIClient::KeyUtils::load_from_pkcs12(@analytics_key_file, 'notasecret') + client.authorization = Signet::OAuth2::Client.new( + :token_credential_uri => 'https://accounts.google.com/o/oauth2/token', + :audience => 'https://accounts.google.com/o/oauth2/token', + :scope => 'https://www.googleapis.com/auth/analytics.readonly', + :issuer => @app_user, + :signing_key => key + ).tap { |auth| auth.fetch_access_token! } + client + end + end + + def self.run(logger, options) + @start_date = options[:start_date] + @ua_conn = GoogleAnalyticsUAConnector.new(options) + + logger.info "Fetching UA analytics for all ontologies from #{@start_date} to today..." + save = {} + analytics_to_migrate = [NcboCron::Models::OntologyVisitsAnalytics, + NcboCron::Models::UsersVisitsAnalytics] + analytics_to_migrate.each do |analytic_object| + analytic_object = analytic_object.new(start_date: @start_date) + ua_data = analytic_object.fetch_ua_object_analytics(logger, @ua_conn) + save[analytic_object.redis_field] = ua_data + end + new_ga_start_date = NcboCron::Models::ObjectAnalytics::GA4_START_DATE + NcboCron::Models::ObjectAnalyticsJob.new(logger).send(:save_data_in_file, save, new_ga_start_date) + logger.info "Completed Universal Analytics pull..." + logger.close + end + end +end +require 'bundler/setup' +require_relative '../lib/ncbo_cron' +require_relative '../config/config' + + +# # Google Analytics UA config +options = { + app_id: nil, + app_name: nil, + app_version: nil, + analytics_key_file: nil, + app_user: nil, + start_date: nil, + analytics_filter: nil, + logfile: nil +} + +help_text = < e msg = "Failed rebuilding ontology analytics repository with exception: #{e.class}: #{e.message}\n#{e.backtrace.join("\n")}" logger.error(msg) diff --git a/bin/ncbo_ontology_annotate_generate_cache b/bin/ncbo_ontology_annotate_generate_cache index b030bafb..b341e5af 100755 --- a/bin/ncbo_ontology_annotate_generate_cache +++ b/bin/ncbo_ontology_annotate_generate_cache @@ -49,7 +49,7 @@ opt_parser = OptionParser.new do |opts| options[:generate_dictionary] = true end - options[:logfile] = "logs/annotator_cache.log" + options[:logfile] = STDOUT opts.on('-l', '--logfile FILE', "Write log to FILE (default is 'logs/annotator_cache.log').") do |filename| options[:logfile] = filename end diff --git a/bin/ncbo_ontology_archive_old_submissions b/bin/ncbo_ontology_archive_old_submissions index 7a8e5950..34fe073c 100755 --- a/bin/ncbo_ontology_archive_old_submissions +++ b/bin/ncbo_ontology_archive_old_submissions @@ -11,52 +11,130 @@ require_relative '../lib/ncbo_cron' config_exists = File.exist?(File.expand_path('../../config/config.rb', __FILE__)) abort("Please create a config/config.rb file using the config/config.rb.sample as a template") unless config_exists require_relative '../config/config' - require 'optparse' -options = {} -options[:force_archiving] = false +options = { delete: false , force_archiving: false} opt_parser = OptionParser.new do |opts| - options[:ontology] = false + # Set a banner, displayed at the top of the help screen. + opts.banner = "Usage: #{File.basename(__FILE__)} [options]" + opts.on('-f', '--force-re-archiving', 'Force to re-archive already archived submissions.') do options[:force_archiving] = true end + options[:logfile] = STDOUT + opts.on( '-l', '--logfile FILE', "Write log to FILE (default is STDOUT)" ) do |filename| + options[:logfile] = filename + end + + # Delete submission if it contains bad data + opts.on( '-d', '--delete', "Delete submissions that contain bad data" ) do + options[:delete] = true + end + # Display the help screen, all programs are assumed to have this option. - opts.on('-h', '--help', 'Display this screen') do + opts.on( '-h', '--help', 'Display this screen' ) do puts opts exit end end opt_parser.parse! - -logfile = 'archive_old_submissions.log' -if File.file?(logfile); - File.delete(logfile); -end +logfile = options[:logfile] +if File.file?(logfile); File.delete(logfile); end logger = Logger.new(logfile) -options = { process_rdf: false, index_search: false, index_commit: false, - run_metrics: false, reasoning: false, archive: true } +process_actions = { process_rdf: false, generate_labels: false, index_search: false, index_commit: false, + process_annotator: false, diff: false, run_metrics: false, archive: true } onts = LinkedData::Models::Ontology.all onts.each { |ont| ont.bring(:acronym, :submissions) } onts.sort! { |a, b| a.acronym <=> b.acronym } +bad_submissions = {} force_archiving = options[:force_archiving] onts.each do |ont| latest_sub = ont.latest_submission + unless latest_sub.nil? id = latest_sub.submissionId subs = ont.submissions - old_subs = subs.reject { |sub| sub.submissionId >= id } + + old_subs = subs.reject { |sub| + begin + sub.submissionId >= id + rescue => e + msg = "Invalid submission ID detected (String instead of Integer): #{ont.acronym}/#{sub.submissionId} - #{e.class}:\n#{e.backtrace.join("\n")}" + puts msg + logger.error(msg) + + if options[:delete] + sub.delete if options[:delete] + msg = "Deleted submission #{ont.acronym}/#{sub.submissionId} due to invalid Submission ID" + puts msg + logger.error(msg) + end + bad_submissions["#{ont.acronym}/#{sub.submissionId}"] = "Invalid Submission ID" + true + end + } old_subs.sort! { |a, b| a.submissionId <=> b.submissionId } old_subs.each do |sub| - if !sub.archived? || force_archiving - msg = "#{ont.acronym}: archive old submission with ID #{sub.submissionId}." + unless sub.archived? || force_archiving + msg = "#{ont.acronym}: found un-archived old submission with ID #{sub.submissionId}." puts msg logger.info msg - NcboCron::Models::OntologySubmissionParser.new.process_submission(logger, sub.id.to_s, options) + + begin + NcboCron::Models::OntologySubmissionParser.new.process_submission(logger, sub.id.to_s, process_actions) + rescue => e + if e.class == Goo::Base::NotValidException + if sub.valid? + msg = "Error archiving submission #{ont.acronym}/#{sub.submissionId} - #{e.class}:\n#{e.backtrace.join("\n")}" + puts msg + logger.error(msg) + bad_submissions["#{ont.acronym}/#{sub.submissionId}"] = "Submission passes valid check but cannot be saved" + else + msg = "Error archiving submission #{ont.acronym}/#{sub.submissionId}:\n#{JSON.pretty_generate(sub.errors)}" + puts msg + logger.error(msg) + + if options[:delete] + sub.delete if options[:delete] + msg = "Deleted submission #{ont.acronym}/#{sub.submissionId} due to invalid data" + puts msg + logger.error(msg) + end + bad_submissions["#{ont.acronym}/#{sub.submissionId}"] = "Submission is not valid to be saved" + end + else + msg = "Error archiving submission #{ont.acronym}/#{sub.submissionId} - #{e.class}:\n#{e.backtrace.join("\n")}" + puts msg + logger.error(msg) + + if options[:delete] && (e.class == Net::HTTPBadResponse || e.class == Errno::ECONNREFUSED) + sub.delete + msg = "Deleted submission #{ont.acronym}/#{sub.submissionId} due to a non-working pull URL" + puts msg + logger.error(msg) + end + bad_submissions["#{ont.acronym}/#{sub.submissionId}"] = "#{e.class} - Runtime error" + end + end end end end end + +puts + +if bad_submissions.empty? + msg = "No errored submissions found" + puts msg + logger.info(msg) +else + msg = JSON.pretty_generate(bad_submissions) + puts msg + logger.error(msg) + msg = "Number of errored submissions: #{bad_submissions.length}" + puts msg + logger.error(msg) +end \ No newline at end of file diff --git a/bin/ncbo_ontology_import b/bin/ncbo_ontology_import index 41ef5b7e..73fa7fac 100755 --- a/bin/ncbo_ontology_import +++ b/bin/ncbo_ontology_import @@ -20,27 +20,27 @@ require 'net/http' require 'optparse' ontologies_acronyms = '' ontology_source = '' -source_api = '' +source_apikey = '' username = '' opt_parser = OptionParser.new do |opts| opts.banner = 'Usage: ncbo_ontology_import [options]' - opts.on('-o', '--ontology ACRONYM', 'Ontologies acronyms which we want to import (separated by comma)') do |acronym| + opts.on('-o', '--ontologies ACRONYM1,ACRONYM2', 'Comma-separated list of ontologies to import') do |acronym| ontologies_acronyms = acronym end - opts.on('--from url', 'The ontoportal api url source of the ontology') do |url| + opts.on('--from URL', 'The ontoportal api url source of the ontology') do |url| ontology_source = url.to_s end - opts.on('--from-api api', 'An apikey to acces the ontoportal api') do |api| - source_api = api.to_s + opts.on('--from-apikey APIKEY', 'An apikey to acces the ontoportal api') do |apikey| + source_apikey = apikey.to_s end - opts.on('--admin-user username', 'The target admin user that will submit the ontology') do |user| + opts.on('--admin-user USERNAME', 'The target admin user that will submit the ontology') do |user| username = user.to_s end # Display the help screen, all programs are assumed to have this option. - opts.on( '-h', '--help', 'Display this screen') do + opts.on('-h', '--help', 'Display this screen') do puts opts exit end @@ -48,9 +48,8 @@ end opt_parser.parse! # URL of the API and APIKEY of the Ontoportal we want to import data FROM -SOURCE_API = ontology_source -SOURCE_APIKEY = source_api - +SOURCE_API = ontology_source +SOURCE_APIKEY = source_apikey # The username of the user that will have the administration rights on the ontology on the target portal TARGETED_PORTAL_USER = username @@ -58,17 +57,15 @@ TARGETED_PORTAL_USER = username # The list of acronyms of ontologies to import ONTOLOGIES_TO_IMPORT = ontologies_acronyms.split(',') || [] - def get_user(username) user = LinkedData::Models::User.find(username).first raise "The user #{username} does not exist" if user.nil? + user.bring_remaining end - # A function to create a new ontology (if already Acronym already existing on the portal it will return HTTPConflict) def create_ontology(ont_info) - new_ontology = LinkedData::Models::Ontology.new new_ontology.acronym = ont_info['acronym'] @@ -112,23 +109,30 @@ def upload_submission(sub_info, ontology) # Build the json body # hasOntologyLanguage options: OWL, UMLS, SKOS, OBO # status: alpha, beta, production, retired - attr_to_reject = %w[id submissionStatus hasOntologyLanguage metrics ontology @id @type contact] - to_copy = sub_info.select do |k,v| + attr_to_reject = %w[id submissionStatus hasOntologyLanguage metrics ontology @id @type contact uploadFilePath diffFilePath] + to_copy = sub_info.select do |k, v| !v.nil? && !v.is_a?(Hash) && !v.to_s.empty? && !attr_to_reject.include?(k) end to_copy["ontology"] = ontology - to_copy["contact"] = [LinkedData::Models::Contact.where(email: USER.email).first] - to_copy["hasOntologyLanguage"] = LinkedData::Models::OntologyFormat.where(acronym: sub_info["hasOntologyLanguage"]).first + + contact = LinkedData::Models::Contact.where(email: USER.email).first + unless contact + contact = LinkedData::Models::Contact.new(name: USER.username, email: USER.email).save + puts "created a new contact; name: #{USER.username}, email: #{USER.email}" + end + + to_copy["contact"] = [contact] + to_copy["hasOntologyLanguage"] = LinkedData::Models::OntologyFormat.where(acronym: sub_info["hasOntologyLanguage"]).first to_copy.each do |key, value| attribute_settings = new_submission.class.attribute_settings(key.to_sym) if attribute_settings - if attribute_settings[:enforce]&.include?(:date_time) + if attribute_settings[:enforce]&.include?(:date_time) value = DateTime.parse(value) elsif attribute_settings[:enforce]&.include?(:uri) && attribute_settings[:enforce]&.include?(:list) value = value.map { |v| RDF::IRI.new(v) } - elsif attribute_settings[:enforce]&.include?(:uri) + elsif attribute_settings[:enforce]&.include?(:uri) value = RDF::IRI.new(value) end end @@ -139,12 +143,11 @@ def upload_submission(sub_info, ontology) new_submission end - USER = get_user username -#get apikey for admin user +# get apikey for admin user TARGET_APIKEY = USER.apikey -SOURCE_APIKEY == '' && abort('--from-api has to be set') +SOURCE_APIKEY == '' && abort('--from-apikey has to be set') SOURCE_API == '' && abort('--from has to be set') def result_log(ressource, errors) @@ -158,10 +161,11 @@ end # Go through all ontologies acronym and get their latest_submission informations ONTOLOGIES_TO_IMPORT.each do |ont| sub_info = JSON.parse(Net::HTTP.get(URI.parse("#{SOURCE_API}/ontologies/#{ont}/latest_submission?apikey=#{SOURCE_APIKEY}&display=all"))) - puts "Import #{ont} " , + puts "Import #{ont} ", "From #{SOURCE_API}" # if the ontology is already created then it will return HTTPConflict, no consequences raise "The ontology #{ont} does not exist" if sub_info['ontology'].nil? + new_ontology = create_ontology(sub_info['ontology']) errors = nil if new_ontology.valid? @@ -174,6 +178,7 @@ ONTOLOGIES_TO_IMPORT.each do |ont| new_ontology ||= LinkedData::Models::Ontology.where(acronym: ont).first new_submission = upload_submission(sub_info, new_ontology) + if new_submission.valid? new_submission.save errors = nil @@ -182,6 +187,3 @@ ONTOLOGIES_TO_IMPORT.each do |ont| end result_log(sub_info["id"], errors) end - - - diff --git a/bin/ncbo_ontology_process b/bin/ncbo_ontology_process index 05a1169b..e9b68182 100755 --- a/bin/ncbo_ontology_process +++ b/bin/ncbo_ontology_process @@ -31,9 +31,14 @@ opt_parser = OptionParser.new do |opts| end options[:tasks] = NcboCron::Models::OntologySubmissionParser::ACTIONS - opts.on('-t', '--tasks process_rdf,index_search,run_metrics', "Optional comma-separated list of processing tasks to perform. Default: #{NcboCron::Models::OntologySubmissionParser::ACTIONS.keys.join(',')}") do |tasks| - t = tasks.split(",").map {|t| t.strip.sub(/^:/, '').to_sym} - options[:tasks].each {|k, _| options[:tasks][k] = false unless t.include?(k)} + opts.on('-t', '--tasks process_rdf,generate_labels=false,index_search,run_metrics', "Optional comma-separated list of processing tasks to perform (or exclude). Default: #{NcboCron::Models::OntologySubmissionParser::ACTIONS.keys.join(',')}") do |tasks| + tasks_obj = {} + tasks.split(',').each { |t| + t_arr = t.gsub(/\s+/, '').gsub(/^:/, '').split('=') + tasks_obj[t_arr[0].to_sym] = (t_arr.length <= 1 || t_arr[1].downcase === 'true') + } + tasks_obj[:generate_labels] = true if tasks_obj[:process_rdf] && !tasks_obj.has_key?(:generate_labels) + options[:tasks].each {|k, _| options[:tasks][k] = false unless tasks_obj[k]} end options[:logfile] = STDOUT diff --git a/config/config.rb.sample b/config/config.rb.sample index 0729a4b0..10cf7e90 100644 --- a/config/config.rb.sample +++ b/config/config.rb.sample @@ -1,33 +1,49 @@ # This file is designed to be used for unit testing with docker-compose -# -GOO_PATH_QUERY = ENV.include?("GOO_PATH_QUERY") ? ENV["GOO_PATH_QUERY"] : "/sparql/" -GOO_PATH_DATA = ENV.include?("GOO_PATH_DATA") ? ENV["GOO_PATH_DATA"] : "/data/" -GOO_PATH_UPDATE = ENV.include?("GOO_PATH_UPDATE") ? ENV["GOO_PATH_UPDATE"] : "/update/" -GOO_BACKEND_NAME = ENV.include?("GOO_BACKEND_NAME") ? ENV["GOO_BACKEND_NAME"] : "localhost" -GOO_PORT = ENV.include?("GOO_PORT") ? ENV["GOO_PORT"] : 9000 -GOO_HOST = ENV.include?("GOO_HOST") ? ENV["GOO_HOST"] : "localhost" -REDIS_HOST = ENV.include?("REDIS_HOST") ? ENV["REDIS_HOST"] : "localhost" -REDIS_PORT = ENV.include?("REDIS_PORT") ? ENV["REDIS_PORT"] : 6379 -MGREP_HOST = ENV.include?("MGREP_HOST") ? ENV["MGREP_HOST"] : "localhost" -MGREP_PORT = ENV.include?("MGREP_PORT") ? ENV["MGREP_PORT"] : 55555 -SOLR_TERM_SEARCH_URL = ENV.include?("SOLR_TERM_SEARCH_URL") ? ENV["SOLR_TERM_SEARCH_URL"] : "http://localhost:8983/solr/term_search_core1" -SOLR_PROP_SEARCH_URL = ENV.include?("SOLR_PROP_SEARCH_URL") ? ENV["SOLR_PROP_SEARCH_URL"] : "http://localhost:8983/solr/prop_search_core1" + +GOO_BACKEND_NAME = ENV.include?("GOO_BACKEND_NAME") ? ENV["GOO_BACKEND_NAME"] : "4store" +GOO_HOST = ENV.include?("GOO_HOST") ? ENV["GOO_HOST"] : "localhost" +GOO_PATH_DATA = ENV.include?("GOO_PATH_DATA") ? ENV["GOO_PATH_DATA"] : "/data/" +GOO_PATH_QUERY = ENV.include?("GOO_PATH_QUERY") ? ENV["GOO_PATH_QUERY"] : "/sparql/" +GOO_PATH_UPDATE = ENV.include?("GOO_PATH_UPDATE") ? ENV["GOO_PATH_UPDATE"] : "/update/" +GOO_PORT = ENV.include?("GOO_PORT") ? ENV["GOO_PORT"] : 9000 +MGREP_HOST = ENV.include?("MGREP_HOST") ? ENV["MGREP_HOST"] : "localhost" +MGREP_PORT = ENV.include?("MGREP_PORT") ? ENV["MGREP_PORT"] : 55555 +MGREP_DICT_PATH = ENV.include?("MGREP_DICT_PATH") ? ENV["MGREP_DICT_PATH"] : "./test/data/dictionary.txt" +REDIS_GOO_CACHE_HOST = ENV.include?("REDIS_GOO_CACHE_HOST") ? ENV["REDIS_GOO_CACHE_HOST"] : "localhost" +REDIS_HTTP_CACHE_HOST = ENV.include?("REDIS_HTTP_CACHE_HOST") ? ENV["REDIS_HTTP_CACHE_HOST"] : "localhost" +REDIS_PERSISTENT_HOST = ENV.include?("REDIS_PERSISTENT_HOST") ? ENV["REDIS_PERSISTENT_HOST"] : "localhost" +REDIS_PORT = ENV.include?("REDIS_PORT") ? ENV["REDIS_PORT"] : 6379 +REPORT_PATH = ENV.include?("REPORT_PATH") ? ENV["REPORT_PATH"] : "./test/tmp/ontologies_report.json" +REPOSITORY_FOLDER = ENV.include?("REPOSITORY_FOLDER") ? ENV["REPOSITORY_FOLDER"] : "./test/data/ontology_files/repo" +REST_URL_PREFIX = ENV.include?("REST_URL_PREFIX") ? ENV["REST_URL_PREFIX"] : "http://localhost:9393" +SOLR_PROP_SEARCH_URL = ENV.include?("SOLR_PROP_SEARCH_URL") ? ENV["SOLR_PROP_SEARCH_URL"] : "http://localhost:8983/solr/prop_search_core1" +SOLR_TERM_SEARCH_URL = ENV.include?("SOLR_TERM_SEARCH_URL") ? ENV["SOLR_TERM_SEARCH_URL"] : "http://localhost:8983/solr/term_search_core1" LinkedData.config do |config| + config.goo_backend_name = GOO_BACKEND_NAME.to_s config.goo_host = GOO_HOST.to_s config.goo_port = GOO_PORT.to_i - config.goo_backend_name = GOO_BACKEND_NAME.to_s config.goo_path_query = GOO_PATH_QUERY.to_s config.goo_path_data = GOO_PATH_DATA.to_s config.goo_path_update = GOO_PATH_UPDATE.to_s - config.goo_redis_host = REDIS_HOST.to_s + config.goo_redis_host = REDIS_GOO_CACHE_HOST.to_s config.goo_redis_port = REDIS_PORT.to_i - config.http_redis_host = REDIS_HOST.to_s + config.http_redis_host = REDIS_HTTP_CACHE_HOST.to_s config.http_redis_port = REDIS_PORT.to_i - config.ontology_analytics_redis_host = REDIS_HOST.to_s + config.ontology_analytics_redis_host = REDIS_PERSISTENT_HOST.to_s config.ontology_analytics_redis_port = REDIS_PORT.to_i + config.repository_folder = REPOSITORY_FOLDER.to_s config.search_server_url = SOLR_TERM_SEARCH_URL.to_s config.property_search_server_url = SOLR_PROP_SEARCH_URL.to_s +# config.replace_url_prefix = false +# config.rest_url_prefix = REST_URL_PREFIX.to_s + # Used to define other bioportal that can be mapped to + # Example to map to ncbo bioportal : {"ncbo" => {"api" => "http://data.bioontology.org", "ui" => "http://bioportal.bioontology.org", "apikey" => ""} + # Then create the mapping using the following class in JSON : "http://purl.bioontology.org/ontology/MESH/C585345": "ncbo:MESH" + # Where "ncbo" is the namespace used as key in the interportal_hash + config.interportal_hash = {"ncbo" => {"api" => "http://data.bioontology.org", "ui" => "http://bioportal.bioontology.org", "apikey" => ""}, + "agroportal" => {"api" => "http://data.agroportal.lirmm.fr", "ui" => "http://agroportal.lirmm.fr", "apikey" => ""}} + # Email notifications. config.enable_notifications = true config.email_sender = "sender@domain.com" # Default sender for emails @@ -40,15 +56,38 @@ LinkedData.config do |config| end Annotator.config do |config| - config.annotator_redis_host = REDIS_HOST.to_s - config.annotator_redis_port = REDIS_PORT.to_i - config.mgrep_host = MGREP_HOST.to_s - config.mgrep_port = MGREP_PORT.to_i - config.mgrep_dictionary_file = "./test/data/dictionary.txt" + config.mgrep_host ||= "localhost" + config.annotator_redis_host = REDIS_PERSISTENT_HOST.to_s + config.annotator_redis_port = REDIS_PORT.to_i + config.mgrep_host = MGREP_HOST.to_s + config.mgrep_port = MGREP_PORT.to_i + config.mgrep_dictionary_file = MGREP_DICT_PATH.to_s end NcboCron.config do |config| - config.redis_host = REDIS_HOST.to_s + config.redis_host = REDIS_PERSISTENT_HOST.to_s config.redis_port = REDIS_PORT.to_i - config.ontology_report_path = "./test/ontologies_report.json" + # Ontologies Report config + config.ontology_report_path = REPORT_PATH + + # do not deaemonize in docker + config.daemonize = false + + config.search_index_all_url = "http://localhost:8983/solr/term_search_core2" + config.property_search_index_all_url = "http://localhost:8983/solr/prop_search_core2" + + # Google Analytics GA4 config + config.analytics_path_to_key_file = "config/your_analytics_key.json" + config.analytics_property_id = "123456789" + + # path to the file that will hold your Google Analytics data + # this is in addition to storing it in Redis + config.analytics_path_to_ga_data_file = "data/your_ga_data.json" + config.analytics_start_date = '2015-01-01' + # this is a Base64.encode64 encoded personal access token + # you need to run Base64.decode64 on it before using it in your code + # this is a workaround because Github does not allow storing access tokens in a repo + config.git_repo_access_token = "YOUR GITHUB REPO PERSONAL ACCESS TOKEN, encoded using Base64" end + +Goo.use_cache = true diff --git a/dip.yml b/dip.yml new file mode 100644 index 00000000..3bbe4444 --- /dev/null +++ b/dip.yml @@ -0,0 +1,54 @@ +version: '7.1' + +# Define default environment variables to pass +# to Docker Compose +#environment: +# RAILS_ENV: development + +compose: + files: + - docker-compose.yml + # project_name: ncbo_cron + +interaction: + # This command spins up a ncbo_cron container with the required dependencies (solr, 4store, etc), + # and opens a terminal within it. + runner: + description: Open a Bash shell within a ncbo_cron container (with dependencies up) + service: ncbo_cron + command: /bin/bash + + # Run a container without any dependent services + bash: + description: Run an arbitrary script within a container (or open a shell without deps) + service: ncbo_cron + command: /bin/bash + compose_run_options: [ no-deps ] + + # A shortcut to run Bundler commands + bundle: + description: Run Bundler commands within ncbo_cron container (with depencendies up) + service: ncbo_cron + command: bundle + + # A shortcut to run unit tests + test: + description: Run unit tests with 4store triplestore + service: ncbo_cron + command: bundle exec rake test TESTOPTS='-v' + + test-ag: + description: Run unit tests with AllegroGraph triplestore + service: ncbo_cron-agraph + command: bundle exec rake test TESTOPTS='-v' + + 'redis-cli': + description: Run Redis console + service: redis-ut + command: redis-cli -h redis-ut + +#provision: + #- dip compose down --volumes + #- dip compose up -d solr 4store + #- dip bundle install + #- dip bash -c bin/setup diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 00000000..9f5d628d --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,155 @@ +x-app: &app + build: + context: . + args: + RUBY_VERSION: '2.7' + # Increase the version number in the image tag every time Dockerfile or its arguments is changed + image: ncbo_cron:0.0.2 + environment: &env + BUNDLE_PATH: /srv/ontoportal/bundle + # default bundle config resolves to /usr/local/bundle/config inside of the container + # we are setting it to local app directory if we need to use 'bundle config local' + BUNDLE_APP_CONFIG: /srv/ontoportal/ncbo_cron/.bundle + COVERAGE: 'true' + GOO_REDIS_HOST: redis-ut + REDIS_GOO_CACHE_HOST: redis-ut + REDIS_HTTP_CACHE_HOST: redis-ut + REDIS_PERSISTENT_HOST: redis-ut + REDIS_PORT: 6379 + SOLR_TERM_SEARCH_URL: http://solr-ut:8983/solr/term_search_core1 + SOLR_PROP_SEARCH_URL: http://solr-ut:8983/solr/prop_search_core1 + MGREP_HOST: mgrep-ut + MGREP_PORT: 55556 + stdin_open: true + tty: true + command: "bundle exec rackup -o 0.0.0.0 --port 9393" + volumes: + # bundle volume for hosting gems installed by bundle; it helps in local development with gem udpates + - bundle:/srv/ontoportal/bundle + # ncbo_cron code + - .:/srv/ontoportal/ncbo_cron + # mount directory containing development version of the gems if you need to use 'bundle config local' + #- /Users/alexskr/ontoportal:/Users/alexskr/ontoportal + depends_on: &depends_on + solr-ut: + condition: service_healthy + redis-ut: + condition: service_healthy + mgrep-ut: + condition: service_healthy + +services: + ncbo_cron: + <<: *app + environment: + <<: *env + GOO_BACKEND_NAME: 4store + GOO_PORT: 9000 + GOO_HOST: 4store-ut + GOO_PATH_QUERY: /sparql/ + GOO_PATH_DATA: /data/ + GOO_PATH_UPDATE: /update/ + profiles: + - 4store + depends_on: + <<: *depends_on + 4store-ut: + condition: service_started + + + ncbo_cron-agraph: + <<: *app + environment: + <<: *env + GOO_BACKEND_NAME: ag + GOO_PORT: 10035 + GOO_HOST: agraph-ut + GOO_PATH_QUERY: /repositories/bioportal_test + GOO_PATH_DATA: /repositories/bioportal_test/statements + GOO_PATH_UPDATE: /repositories/bioportal_test/statements + profiles: + - agraph + depends_on: + <<: *depends_on + agraph-ut: + condition: service_healthy + + + redis-ut: + image: redis + ports: + - 6379:6379 + healthcheck: + test: redis-cli ping + interval: 10s + timeout: 3s + retries: 10 + + 4store-ut: + image: bde2020/4store + platform: linux/amd64 + #volume: fourstore:/var/lib/4store + command: > + bash -c "4s-backend-setup --segments 4 ontoportal_kb + && 4s-backend ontoportal_kb + && 4s-httpd -D -s-1 -p 9000 ontoportal_kb" + profiles: + - 4store + ports: + - 9000:9000 + + solr-ut: + image: solr:8 + volumes: + - ./test/solr/configsets:/configsets:ro + ports: + - "8983:8983" + command: > + bash -c "precreate-core term_search_core1 /configsets/term_search + && precreate-core prop_search_core1 /configsets/property_search + && solr-foreground" + healthcheck: + test: [ "CMD-SHELL", "curl -sf http://localhost:8983/solr/term_search_core1/admin/ping?wt=json | grep -iq '\"status\":\"OK\"}' || exit 1" ] + start_period: 3s + interval: 10s + timeout: 5s + retries: 5 + + mgrep-ut: + image: ontoportal/mgrep:0.0.2 + platform: linux/amd64 + healthcheck: + test: ["CMD", "nc", "-z", "-v", "localhost", "55556"] + start_period: 3s + interval: 10s + timeout: 5s + retries: 5 + ports: + - 55556:55555 + + agraph-ut: + image: franzinc/agraph:v8.0.0 + platform: linux/amd64 + environment: + - AGRAPH_SUPER_USER=test + - AGRAPH_SUPER_PASSWORD=xyzzy + shm_size: 1g + # ports: + # - 10035:10035 + command: > + bash -c "/agraph/bin/agraph-control --config /agraph/etc/agraph.cfg start + ; agtool repos create bioportal_test + ; agtool users add anonymous + ; agtool users grant anonymous root:bioportal_test:rw + ; tail -f /agraph/data/agraph.log" + healthcheck: + test: ["CMD-SHELL", "agtool storage-report bioportal_test || exit 1"] + start_period: 20s + interval: 60s + timeout: 5s + retries: 3 + profiles: + - agraph + +volumes: + bundle: diff --git a/lib/ncbo_cron.rb b/lib/ncbo_cron.rb index 884e6b33..83d67caa 100644 --- a/lib/ncbo_cron.rb +++ b/lib/ncbo_cron.rb @@ -11,7 +11,7 @@ require_relative 'ncbo_cron/scheduler' require_relative 'ncbo_cron/query_caching' require_relative 'ncbo_cron/ontologies_report' -require_relative 'ncbo_cron/ontology_analytics' +require_relative 'ncbo_cron/analytics/object_analytics_job' require_relative 'ncbo_cron/ontology_rank' require_relative 'ncbo_cron/spam_deletion' require_relative 'ncbo_cron/mapping_counts' diff --git a/lib/ncbo_cron/analytics/object_analytics.rb b/lib/ncbo_cron/analytics/object_analytics.rb new file mode 100644 index 00000000..d4a7f3da --- /dev/null +++ b/lib/ncbo_cron/analytics/object_analytics.rb @@ -0,0 +1,197 @@ +require 'logger' +require 'json' +require 'benchmark' +require 'google/analytics/data' +require 'google/apis/analytics_v3' +require 'google/api_client/auth/key_utils' + +module NcboCron + module Models + + + + class GoogleAnalyticsConnector + + attr_reader :ga_client + + def initialize + @app_id = NcboCron.settings.analytics_property_id + @app_key_file = NcboCron.settings.analytics_path_to_key_file + @ga_client = analytics_data_client + end + + def run_request(metrics:, dimensions:, date_ranges:, order_bys:, offset:, limit:, dimension_filter:) + request = Google::Analytics::Data::V1beta::RunReportRequest.new( + property: "properties/#{@app_id}", + metrics: metrics.map { |m| ga_metric(m) }, + dimension_filter: dimension_filter.empty? ? nil : ga_filter(*dimension_filter), + dimensions: dimensions.map { |d| ga_dimension(d) }, + date_ranges: [ga_date_range(*date_ranges)], + order_bys: order_bys.map { |o| ga_order_by(o) }, + offset: offset, + limit: limit + ) + + @ga_client.run_report request + end + + private + + def analytics_data_client + Google::Analytics::Data.analytics_data do |config| + config.credentials = @app_key_file + end + end + + def ga_metric(name) + Google::Analytics::Data::V1beta::Metric.new( + name: name + ) + end + + def ga_date_range(start_date, end_date) + Google::Analytics::Data::V1beta::DateRange.new( + start_date: start_date, + end_date: end_date + ) + end + + def ga_dimension(name) + Google::Analytics::Data::V1beta::Dimension.new( + name: name + ) + end + + def ga_filter(field_name, value) + string_filter = Google::Analytics::Data::V1beta::Filter::StringFilter.new( + match_type: Google::Analytics::Data::V1beta::Filter::StringFilter::MatchType::FULL_REGEXP, + value: value + ) + + filter = Google::Analytics::Data::V1beta::Filter.new( + field_name: field_name, + string_filter: string_filter + ) + Google::Analytics::Data::V1beta::FilterExpression.new(filter: filter) + end + + def ga_order_by(dimension_name, desc = false) + order = Google::Analytics::Data::V1beta::OrderBy::DimensionOrderBy.new( + dimension_name: dimension_name + ) + Google::Analytics::Data::V1beta::OrderBy.new( + desc: desc, + dimension: order + ) + end + + end + + class ObjectAnalytics + GA4_START_DATE = '2023-06-01' + attr_reader :redis_field + + def initialize(redis_field:, start_date:, old_data: {}) + @redis_field = redis_field + @start_date = Date.parse(start_date) rescue Date.parse(NcboCron.settings.analytics_start_date) + @old_data = old_data[@redis_field] || {} + end + + def full_data(logger, ga_conn) + + logger.info "Fetching GA4 analytics for #{@redis_field} from #{@start_date} to today..." + logger.flush + new_ga_data = fetch_object_analytics(logger, ga_conn) + + merge_and_fill_missing_data(new_ga_data, @old_data, logger) + end + + # @param ga_conn GoogleAnalyticsConnector + def fetch_object_analytics(logger, ga_conn) + raise NotImplementedError, "Subclasses must implement this method" + end + + private + + def merge_and_fill_missing_data(new_data, old_data,logger, start_date = @start_date) + if !new_data.empty? + logger.info "Merging old Google Analytics and the new data..." + logger.flush + new_data.keys.each do |acronym| + if old_data.has_key?(acronym) + (start_date.year..Date.today.year).each do |year| + year = year.to_s + if new_data[acronym].has_key?(year) + if old_data[acronym].has_key?(year) + (1..Date.today.month).each do |month| + month = month.to_s + old_data[acronym][year][month] ||= 0 + unless old_data[acronym][year][month].eql?(new_data[acronym][year][month]) + old_data[acronym][year][month] += (new_data[acronym][year][month] || 0) + end + end + else + old_data[acronym][year] = new_data[acronym][year] + end + end + end + else + old_data[acronym]= new_data[acronym] + end + end + # fill missing years and months + logger.info "Filling in missing years data..." + old_data = fill_missing_data(old_data) + end + + # sort_ga_data(old_data) + old_data + end + + def aggregate_results(aggregated_results, results) + results.each do |row| + + year = row[1].to_i.to_s + month = row[2].to_i.to_s + value = row[3].to_i + aggregated = aggregated_results + # year + if aggregated.has_key?(year) + # month + if aggregated[year].has_key?(month) + aggregated[year][month] += value + else + aggregated[year][month] = value + end + else + aggregated[year] = Hash.new + aggregated[year][month] = value + end + end + end + + def fill_missing_data(ga_data) + # fill up non existent years + start_year = @start_date.year + + ga_data.each do |acronym, _| + (start_year..Date.today.year).each do |y| + ga_data[acronym] = Hash.new if ga_data[acronym].nil? + ga_data[acronym][y.to_s] = Hash.new unless ga_data[acronym].has_key?(y.to_s) + end + # fill up non existent months with zeros + (1..12).each { |n| ga_data[acronym].values.each { |v| v[n.to_s] = 0 if v.is_a?(Hash) && !v.has_key?(n.to_s) } } + end + end + + def sort_ga_data(ga_data) + ga_data.transform_values { |value| + value.transform_values { |val| + val.sort_by { |key, _| key.to_i }.to_h + }.sort_by { |k, _| k.to_i }.to_h + }.sort.to_h + end + + end + end +end diff --git a/lib/ncbo_cron/analytics/object_analytics_job.rb b/lib/ncbo_cron/analytics/object_analytics_job.rb new file mode 100644 index 00000000..8133aeb0 --- /dev/null +++ b/lib/ncbo_cron/analytics/object_analytics_job.rb @@ -0,0 +1,84 @@ +require 'logger' +require 'json' +require 'benchmark' +require 'google/analytics/data' +require 'google/apis/analytics_v3' +require 'google/api_client/auth/key_utils' + +require_relative 'ontology_visits_analytics' +require_relative 'user_visits_analytics' +require_relative 'page_visits_analytics' + +module NcboCron + module Models + class ObjectAnalyticsJob + def initialize(logger) + @redis_host = LinkedData.settings.ontology_analytics_redis_host + @redis_port = LinkedData.settings.ontology_analytics_redis_port + + @data_file = NcboCron.settings.analytics_path_to_ga_data_file + + + + @logger = logger + @logger.info "Authenticating with the Google Analytics Endpoint..." + @ga_conn = GoogleAnalyticsConnector.new + + @analytics_objects = [ + NcboCron::Models::OntologyVisitsAnalytics, + NcboCron::Models::UsersVisitsAnalytics, + NcboCron::Models::PageVisitsAnalytics, + ] + end + + def run + redis = Redis.new(:host => @redis_host, :port => @redis_port) + @logger.info "Starting Google Analytics refresh..." + time = Benchmark.realtime do + @logger.info "Fetching saved analytics data..." + save = {} + @old_data = read_old_data + @analytics_objects.each do |analytic_object| + analytic_object = analytic_object.new(start_date: detect_latest_date, old_data: @old_data) + @logger.info "Start fetching new #{analytic_object.redis_field} data..." + new_data = analytic_object.full_data(@logger, @ga_conn) + save[analytic_object.redis_field] = new_data + redis.set(analytic_object.redis_field, Marshal.dump(new_data)) + @logger.info "Completed fetching #{analytic_object.redis_field} data..." + end + save_data_in_file(save) + end + @logger.info "Completed Google Analytics refresh in #{(time / 60).round(1)} minutes." + @logger.flush + end + + private + def read_old_data + return {} unless File.exists?(@data_file) && !File.zero?(@data_file) + JSON.parse(File.read(@data_file)) + end + + def detect_latest_date + begin + input_date = Date.parse(@old_data['latest_date_save']).prev_month(6) + start_of_month = Date.new(input_date.year, input_date.month, 1) + start_of_month.to_s + rescue + nil + end + + end + + def save_data_in_file(new_data, saved_date = Date.today.to_s, data_file = @data_file) + new_data["latest_date_save"] = saved_date + # Ensure the directory exists before creating the file + FileUtils.mkdir_p(File.dirname(data_file)) + # Open the file with 'w+' mode to create if not exist and write + File.open(data_file, 'w+') do |f| + f.write(new_data.to_json) + end + end + end + + end +end diff --git a/lib/ncbo_cron/analytics/ontology_visits_analytics.rb b/lib/ncbo_cron/analytics/ontology_visits_analytics.rb new file mode 100644 index 00000000..41d6edfd --- /dev/null +++ b/lib/ncbo_cron/analytics/ontology_visits_analytics.rb @@ -0,0 +1,64 @@ +require 'logger' +require 'json' +require 'benchmark' +require_relative 'object_analytics' + +module NcboCron + module Models + + class OntologyVisitsAnalytics < ObjectAnalytics + + ONTOLOGY_ANALYTICS_REDIS_FIELD = 'ontology_analytics' + + def initialize(start_date: , old_data: {}) + super(redis_field: ONTOLOGY_ANALYTICS_REDIS_FIELD, start_date: start_date, old_data: old_data) + @ont_acronyms = LinkedData::Models::Ontology.where.include(:acronym).all.map { |o| o.acronym } + end + + def fetch_object_analytics(logger, ga_conn) + @logger = logger + @ga_conn = ga_conn + + aggregated_results = Hash.new + max_results = 10000 + + @ont_acronyms.each do |acronym| + start_index = 0 + filer_regex = "^(\\/ontologies\\/#{acronym})(\\/?\\?{0}|\\/?\\?{1}.*)$" + + loop do + response = @ga_conn.run_request( + date_ranges: [[@start_date, Date.parse(GA4_START_DATE)].max.to_s, Date.today.to_s], + metrics: ['screenPageViews'], + dimensions: %w[pagePath year month], + order_bys: %w[year month], + dimension_filter: ['pagePath', filer_regex], + offset: start_index, + limit: max_results + ) + + response.rows ||= [] + num_results = response.rows.length + @logger.info "Acronym: #{acronym}, Results: #{num_results}, Start Index: #{start_index}" + @logger.flush + start_index += max_results + results = [] + + response.rows.each do |row| + row_h = row.to_h + year_month_hits = row_h[:dimension_values].map.with_index { + |v, i| i > 0 ? v[:value].to_s : row_h[:metric_values][0][:value].to_s + }.rotate(1) + results << ([acronym] + year_month_hits) + end + aggregated_results[acronym] = Hash.new unless aggregated_results.has_key?(acronym) + aggregate_results(aggregated_results[acronym], results) + break if num_results < max_results + end + end + aggregated_results + end + end + + end +end diff --git a/lib/ncbo_cron/analytics/page_visits_analytics.rb b/lib/ncbo_cron/analytics/page_visits_analytics.rb new file mode 100644 index 00000000..b2354c00 --- /dev/null +++ b/lib/ncbo_cron/analytics/page_visits_analytics.rb @@ -0,0 +1,66 @@ +require 'logger' +require 'json' +require 'benchmark' +require_relative 'object_analytics' + +module NcboCron + module Models + class PageVisitsAnalytics < ObjectAnalytics + def initialize(start_date: Date.today.prev_month, old_data: {}) + super(redis_field: 'pages_analytics', start_date: Date.today.prev_month, old_data: { }) + end + + private + + def fetch_object_analytics(logger, ga_conn) + @logger = logger + @ga_conn = ga_conn + + aggregated_results = Hash.new + max_results = 10000 + + start_index = 0 + loop do + response = @ga_conn.run_request( + date_ranges: [[@start_date, Date.parse(GA4_START_DATE)].max.to_s, Date.today.to_s], + metrics: ['screenPageViews'], + dimensions: %w[pagePathPlusQueryString], + order_bys: %w[screenPageViews], + dimension_filter: [], + offset: start_index, + limit: max_results + ) + + response.rows ||= [] + num_results = response.rows.length + @logger.info "Results: #{num_results}, Start Index: #{start_index}" + @logger.flush + start_index += max_results + results = [] + aggregated_results = {} + response.rows.each do |row| + row_h = row.to_h + year_month_hits = row_h[:dimension_values].map{ |x| x[:value] } + [row_h[:metric_values].first[:value]] + results << year_month_hits + page_count = year_month_hits[1].to_i + page_path = year_month_hits[0] + page_path = year_month_hits[0].chop if page_path.end_with?('/') && !page_path.eql?('/') + if page_count >= 10 + old_page_count = aggregated_results[page_path] || 0 + aggregated_results[page_path] = old_page_count + page_count + end + end + + break if num_results < max_results + end + {"all_pages" => aggregated_results } + end + + # we don't the missing data in this case + def fill_missing_data(ga_data) + ga_data + end + end + end +end + diff --git a/lib/ncbo_cron/analytics/user_visits_analytics.rb b/lib/ncbo_cron/analytics/user_visits_analytics.rb new file mode 100644 index 00000000..9b2b6631 --- /dev/null +++ b/lib/ncbo_cron/analytics/user_visits_analytics.rb @@ -0,0 +1,58 @@ +require 'logger' +require 'json' +require 'benchmark' +require_relative 'object_analytics' + +module NcboCron + module Models + class UsersVisitsAnalytics < ObjectAnalytics + def initialize(start_date: , old_data: {}) + super(redis_field: 'user_analytics', start_date: start_date, old_data: old_data) + end + + private + + def fetch_object_analytics(logger, ga_conn) + @logger = logger + @ga_conn = ga_conn + + aggregated_results = Hash.new + max_results = 10000 + + + + start_index = 0 + loop do + response = @ga_conn.run_request( + date_ranges: [[@start_date, Date.parse(GA4_START_DATE)].max.to_s, Date.today.to_s], + metrics: ['newUsers'], + dimensions: %w[year month], + order_bys: %w[year month], + dimension_filter: [], + offset: start_index, + limit: max_results + ) + + response.rows ||= [] + num_results = response.rows.length + @logger.info "Results: #{num_results}, Start Index: #{start_index}" + @logger.flush + start_index += max_results + results = [] + response.rows.each do |row| + row_h = row.to_h + year_month_hits = row_h[:dimension_values].map{ |x| x[:value] } + [row_h[:metric_values].first[:value]] + results << ([-1] + year_month_hits) + end + aggregate_results(aggregated_results, results) + break if num_results < max_results + + end + {"all_users" => aggregated_results} + end + + + end + end +end + diff --git a/lib/ncbo_cron/config.rb b/lib/ncbo_cron/config.rb index 798768b2..f7e4acad 100644 --- a/lib/ncbo_cron/config.rb +++ b/lib/ncbo_cron/config.rb @@ -42,16 +42,8 @@ def config(&block) @settings.enable_spam_deletion ||= true # enable update check (vor VMs) @settings.enable_update_check ||= true - - - - # enable mgrep dictionary generation job - @settings.enable_dictionary_generation ||= true - - - - + @settings.enable_dictionary_generation_cron_job ||= false # UMLS auto-pull @settings.pull_umls_url ||= "" @@ -94,17 +86,9 @@ def config(&block) @settings.cron_obofoundry_sync ||= "0 8 * * 1,2,3,4,5" # 00 3 * * * - run daily at 3:00AM @settings.cron_update_check ||= "00 3 * * *" - - - - # mgrep dictionary generation schedule # 30 3 * * * - run daily at 3:30AM - @settings.cron_dictionary_generation ||= "30 3 * * *" - - - - + @settings.cron_dictionary_generation_cron_job ||= "30 3 * * *" @settings.log_level ||= :info unless (@settings.log_path && File.exists?(@settings.log_path)) diff --git a/lib/ncbo_cron/ontology_analytics.rb b/lib/ncbo_cron/ontology_analytics.rb deleted file mode 100644 index 097821fe..00000000 --- a/lib/ncbo_cron/ontology_analytics.rb +++ /dev/null @@ -1,133 +0,0 @@ -require 'logger' -require 'google/apis/analytics_v3' -require 'google/api_client/auth/key_utils' - -module NcboCron - module Models - - class OntologyAnalytics - ONTOLOGY_ANALYTICS_REDIS_FIELD = "ontology_analytics" - - def initialize(logger) - @logger = logger - end - - def run - redis = Redis.new(:host => NcboCron.settings.redis_host, :port => NcboCron.settings.redis_port) - ontology_analytics = fetch_ontology_analytics - redis.set(ONTOLOGY_ANALYTICS_REDIS_FIELD, Marshal.dump(ontology_analytics)) - end - - def fetch_ontology_analytics - google_client = authenticate_google - aggregated_results = Hash.new - start_year = Date.parse(NcboCron.settings.analytics_start_date).year || 2013 - ont_acronyms = LinkedData::Models::Ontology.where.include(:acronym).all.map {|o| o.acronym} - # ont_acronyms = ["NCIT", "ONTOMA", "CMPO", "AEO", "SNOMEDCT"] - filter_str = (NcboCron.settings.analytics_filter_str.nil? || NcboCron.settings.analytics_filter_str.empty?) ? "" : ";#{NcboCron.settings.analytics_filter_str}" - - # If the user add filter through the configuration file - if !NcboCron.settings.analytics_filter_str.nil? && NcboCron.settings.analytics_filter_str != "" - analytics_filter = ";" + NcboCron.settings.analytics_filter_str - else - analytics_filter = "" - end - - ont_acronyms.each do |acronym| - max_results = 10000 - num_results = 10000 - start_index = 1 - results = nil - - loop do - results = google_client.get_ga_data( - ids = NcboCron.settings.analytics_profile_id, - start_date = NcboCron.settings.analytics_start_date, - end_date = Date.today.to_s, - metrics = 'ga:pageviews', - { - dimensions: 'ga:pagePath,ga:year,ga:month', - filters: "ga:pagePath=~^(\\/ontologies\\/#{acronym})(\\/?\\?{0}|\\/?\\?{1}.*)$#{filter_str}", - start_index: start_index, - max_results: max_results - } - ) - results.rows ||= [] - start_index += max_results - num_results = results.rows.length - @logger.info "Acronym: #{acronym}, Results: #{num_results}, Start Index: #{start_index}" - @logger.flush - - results.rows.each do |row| - if aggregated_results.has_key?(acronym) - # year - if aggregated_results[acronym].has_key?(row[1].to_i) - # month - if aggregated_results[acronym][row[1].to_i].has_key?(row[2].to_i) - aggregated_results[acronym][row[1].to_i][row[2].to_i] += row[3].to_i - else - aggregated_results[acronym][row[1].to_i][row[2].to_i] = row[3].to_i - end - else - aggregated_results[acronym][row[1].to_i] = Hash.new - aggregated_results[acronym][row[1].to_i][row[2].to_i] = row[3].to_i - end - else - aggregated_results[acronym] = Hash.new - aggregated_results[acronym][row[1].to_i] = Hash.new - aggregated_results[acronym][row[1].to_i][row[2].to_i] = row[3].to_i - end - end - - if num_results < max_results - # fill up non existent years - (start_year..Date.today.year).each do |y| - aggregated_results[acronym] = Hash.new if aggregated_results[acronym].nil? - aggregated_results[acronym][y] = Hash.new unless aggregated_results[acronym].has_key?(y) - end - # fill up non existent months with zeros - (1..12).each { |n| aggregated_results[acronym].values.each { |v| v[n] = 0 unless v.has_key?(n) } } - break - end - end - end - - @logger.info "Completed ontology analytics refresh..." - @logger.flush - - aggregated_results - end - - def authenticate_google - Google::Apis::ClientOptions.default.application_name = NcboCron.settings.analytics_app_name - Google::Apis::ClientOptions.default.application_version = NcboCron.settings.analytics_app_version - # enable google api call retries in order to - # minigate analytics processing failure due to ocasional google api timeouts and other outages - Google::Apis::RequestOptions.default.retries = 5 - # uncoment to enable logging for debugging purposes - # Google::Apis.logger.level = Logger::DEBUG - # Google::Apis.logger = @logger - client = Google::Apis::AnalyticsV3::AnalyticsService.new - key = Google::APIClient::KeyUtils::load_from_pkcs12(NcboCron.settings.analytics_path_to_key_file, 'notasecret') - client.authorization = Signet::OAuth2::Client.new( - :token_credential_uri => 'https://accounts.google.com/o/oauth2/token', - :audience => 'https://accounts.google.com/o/oauth2/token', - :scope => 'https://www.googleapis.com/auth/analytics.readonly', - :issuer => NcboCron.settings.analytics_service_account_email_address, - :signing_key => key - ).tap { |auth| auth.fetch_access_token! } - client - end - end - end -end - -# require 'ontologies_linked_data' -# require 'goo' -# require 'ncbo_annotator' -# require 'ncbo_cron/config' -# require_relative '../../config/config' -# ontology_analytics_log_path = File.join("logs", "ontology-analytics.log") -# ontology_analytics_logger = Logger.new(ontology_analytics_log_path) -# NcboCron::Models::OntologyAnalytics.new(ontology_analytics_logger).run -# ./bin/ncbo_cron --disable-processing true --disable-pull true --disable-flush true --disable-warmq true --disable-ontologies-report true --disable-mapping-counts true --disable-spam-deletion true --ontology-analytics '14 * * * *' diff --git a/lib/ncbo_cron/ontology_helper.rb b/lib/ncbo_cron/ontology_helper.rb new file mode 100644 index 00000000..4d1f0716 --- /dev/null +++ b/lib/ncbo_cron/ontology_helper.rb @@ -0,0 +1,186 @@ +require 'logger' + +module NcboCron + module Helpers + module OntologyHelper + + REDIS_SUBMISSION_ID_PREFIX = "sub:" + PROCESS_QUEUE_HOLDER = "parseQueue" + PROCESS_ACTIONS = { + :process_rdf => true, + :generate_labels => true, + :extract_metadata => true, + :index_search => true, + :index_properties => true, + :run_metrics => true, + :process_annotator => true, + :diff => true, + :remote_pull => false + } + + class RemoteFileException < StandardError + attr_reader :submission + + def initialize(submission) + super + @submission = submission + end + end + + def self.do_ontology_pull(ontology_acronym, enable_pull_umls = false, umls_download_url = '', logger = nil, + add_to_queue = true) + logger ||= Logger.new($stdout) + ont = LinkedData::Models::Ontology.find(ontology_acronym).include(:acronym).first + new_submission = nil + raise StandardError, "Ontology #{ontology_acronym} not found" if ont.nil? + + last = ont.latest_submission(status: :any) + raise StandardError, "No submission found for #{ontology_acronym}" if last.nil? + + last.bring(:hasOntologyLanguage) if last.bring?(:hasOntologyLanguage) + if !enable_pull_umls && last.hasOntologyLanguage.umls? + raise StandardError, "Pull umls not enabled" + end + + last.bring(:pullLocation) if last.bring?(:pullLocation) + raise StandardError, "#{ontology_acronym} has no pullLocation" if last.pullLocation.nil? + + last.bring(:uploadFilePath) if last.bring?(:uploadFilePath) + + if last.hasOntologyLanguage.umls? && umls_download_url && !umls_download_url.empty? + last.pullLocation = RDF::URI.new(umls_download_url + last.pullLocation.split("/")[-1]) + logger.info("Using alternative download for umls #{last.pullLocation.to_s}") + logger.flush + end + + if last.remote_file_exists?(last.pullLocation.to_s) + logger.info "Checking download for #{ont.acronym}" + logger.info "Location: #{last.pullLocation.to_s}"; logger.flush + file, filename = last.download_ontology_file + file, md5local, md5remote, new_file_exists = self.new_file_exists?(file, last) + + if new_file_exists + logger.info "New file found for #{ont.acronym}\nold: #{md5local}\nnew: #{md5remote}" + logger.flush() + new_submission = self.create_submission(ont, last, file, filename, logger, add_to_queue) + else + logger.info "There is no new file found for #{ont.acronym}" + logger.flush() + end + + file.close + new_submission + else + raise self::RemoteFileException.new(last) + end + end + + def self.create_submission(ont, sub, file, filename, logger = nil, add_to_queue = true, new_version = nil, + new_released = nil) + logger ||= Kernel.const_defined?("LOGGER") ? Kernel.const_get("LOGGER") : Logger.new(STDOUT) + new_sub = LinkedData::Models::OntologySubmission.new + + sub.bring_remaining + sub.loaded_attributes.each do |attr| + new_sub.send("#{attr}=", sub.send(attr)) + end + + submission_id = ont.next_submission_id() + new_sub.submissionId = submission_id + file_location = LinkedData::Models::OntologySubmission.copy_file_repository(ont.acronym, submission_id, file, filename) + new_sub.uploadFilePath = file_location + + unless new_version.nil? + new_sub.version = new_version + end + + if new_released.nil? + new_sub.released = DateTime.now + else + new_sub.released = DateTime.parse(new_released) + end + new_sub.submissionStatus = nil + new_sub.creationDate = nil + new_sub.missingImports = nil + new_sub.metrics = nil + full_file_path = File.expand_path(file_location) + + # check if OWLAPI is able to parse the file before creating a new submission + owlapi = LinkedData::Parser::OWLAPICommand.new( + full_file_path, + File.expand_path(new_sub.data_folder.to_s), + logger: logger) + owlapi.disable_reasoner + parsable = true + + begin + owlapi.parse + rescue Exception => e + logger.error("The new file for ontology #{ont.acronym}, submission id: #{submission_id} did not clear OWLAPI: #{e.class}: #{e.message}\n#{e.backtrace.join("\n\t")}") + logger.error("A new submission has NOT been created.") + logger.flush + parsable = false + end + + if parsable + if new_sub.valid? + new_sub.save() + + if add_to_queue + self.queue_submission(new_sub, { all: true }) + logger.info("OntologyPull created a new submission (#{submission_id}) for ontology #{ont.acronym}") + end + else + logger.error("Unable to create a new submission for ontology #{ont.acronym} with id #{submission_id}: #{new_sub.errors}") + logger.flush + end + else + # delete the bad file + File.delete full_file_path if File.exist? full_file_path + end + new_sub + end + + def self.queue_submission(submission, actions={:all => true}) + redis = Redis.new(:host => NcboCron.settings.redis_host, :port => NcboCron.settings.redis_port) + + if actions[:all] + actions = PROCESS_ACTIONS.dup + else + actions.delete_if {|k, v| !PROCESS_ACTIONS.has_key?(k)} + end + actionStr = MultiJson.dump(actions) + redis.hset(PROCESS_QUEUE_HOLDER, get_prefixed_id(submission.id), actionStr) unless actions.empty? + end + + def self.get_prefixed_id(id) + "#{REDIS_SUBMISSION_ID_PREFIX}#{id}" + end + + def self.last_fragment_of_uri(uri) + uri.to_s.split("/")[-1] + end + + def self.acronym_from_submission_id(submissionID) + submissionID.to_s.split("/")[-3] + end + + def self.new_file_exists?(file, last) + file = File.open(file.path, "rb") + remote_contents = file.read + md5remote = Digest::MD5.hexdigest(remote_contents) + + if last.uploadFilePath && File.exist?(last.uploadFilePath) + file_contents = open(last.uploadFilePath) { |f| f.read } + md5local = Digest::MD5.hexdigest(file_contents) + new_file_exists = (not md5remote.eql?(md5local)) + else + # There is no existing file, so let's create a submission with the downloaded one + new_file_exists = true + end + return file, md5local, md5remote, new_file_exists + end + + end + end +end \ No newline at end of file diff --git a/lib/ncbo_cron/ontology_pull.rb b/lib/ncbo_cron/ontology_pull.rb index 1a6db3ad..87e57046 100644 --- a/lib/ncbo_cron/ontology_pull.rb +++ b/lib/ncbo_cron/ontology_pull.rb @@ -1,6 +1,5 @@ -require 'open-uri' require 'logger' -require_relative 'ontology_submission_parser' +require_relative 'ontology_helper' module NcboCron module Models diff --git a/lib/ncbo_cron/ontology_rank.rb b/lib/ncbo_cron/ontology_rank.rb index b60c2740..64de8844 100644 --- a/lib/ncbo_cron/ontology_rank.rb +++ b/lib/ncbo_cron/ontology_rank.rb @@ -1,5 +1,6 @@ require 'logger' require 'benchmark' +require_relative 'ontology_helper' module NcboCron module Models @@ -66,7 +67,7 @@ def umls_scores(ontologies) ontologies.each do |ont| if ont.group && !ont.group.empty? - umls_gr = ont.group.select {|gr| acronym_from_id(gr.id.to_s).include?('UMLS')} + umls_gr = ont.group.select {|gr| NcboCron::Helpers::OntologyHelper.last_fragment_of_uri(gr.id.to_s).include?('UMLS')} scores[ont.acronym] = umls_gr.empty? ? 0 : 1 else scores[ont.acronym] = 0 @@ -75,10 +76,6 @@ def umls_scores(ontologies) scores end - def acronym_from_id(id) - id.to_s.split("/")[-1] - end - def normalize(x, xmin, xmax, ymin, ymax) xrange = xmax - xmin yrange = ymax - ymin diff --git a/lib/ncbo_cron/ontology_submission_parser.rb b/lib/ncbo_cron/ontology_submission_parser.rb index b0a3309c..bd56699c 100644 --- a/lib/ncbo_cron/ontology_submission_parser.rb +++ b/lib/ncbo_cron/ontology_submission_parser.rb @@ -1,48 +1,22 @@ require 'multi_json' +require_relative 'ontology_helper' module NcboCron module Models class OntologySubmissionParser - QUEUE_HOLDER = "parseQueue" - IDPREFIX = "sub:" - - ACTIONS = { - :process_rdf => true, - :extract_metadata => true, - :index_search => true, - :index_properties => true, - :run_metrics => true, - :process_annotator => true, - :diff => true, - :params => nil - } + QUEUE_HOLDER = NcboCron::Helpers::OntologyHelper::PROCESS_QUEUE_HOLDER + ACTIONS = NcboCron::Helpers::OntologyHelper::PROCESS_ACTIONS def initialize() end - # Add a submission in the queue - def queue_submission(submission, actions={:all => true}) - redis = Redis.new(:host => NcboCron.settings.redis_host, :port => NcboCron.settings.redis_port) - if actions[:all] - if !actions[:params].nil? - # Retrieve params added by the user - user_params = actions[:params].dup - actions = ACTIONS.dup - actions[:params] = user_params.dup - else - actions = ACTIONS.dup - end - else - actions.delete_if {|k, v| !ACTIONS.has_key?(k)} - end - actionStr = MultiJson.dump(actions) - redis.hset(QUEUE_HOLDER, get_prefixed_id(submission.id), actionStr) unless actions.empty? + def queue_submission(submission, actions={ :all => true }) + NcboCron::Helpers::OntologyHelper.queue_submission(submission, actions) end - # Process submissions waiting in the queue - def process_queue_submissions(options = {}) + def process_queue_submissions(options={}) logger = options[:logger] logger ||= Kernel.const_defined?("LOGGER") ? Kernel.const_get("LOGGER") : Logger.new(STDOUT) redis = Redis.new(:host => NcboCron.settings.redis_host, :port => NcboCron.settings.redis_port) @@ -53,6 +27,20 @@ def process_queue_submissions(options = {}) realKey = process_data[:key] key = process_data[:redis_key] redis.hdel(QUEUE_HOLDER, key) + + # if :remote_pull is one of the actions, pull the ontology and halt if no new submission is found + # if a new submission is found, replace the submission ID with the new one and proceed with + # processing the remaining actions on the new submission + if actions.key?(:remote_pull) && actions[:remote_pull] + acronym = NcboCron::Helpers::OntologyHelper.acronym_from_submission_id(realKey) + new_submission = NcboCron::Helpers::OntologyHelper.do_ontology_pull(acronym, enable_pull_umls: false, + umls_download_url: '', logger: logger, + add_to_queue: false) + return unless new_submission + realKey = new_submission.id.to_s + actions.delete(:remote_pull) + end + begin process_submission(logger, realKey, actions) rescue Exception => e @@ -65,7 +53,7 @@ def process_queue_submissions(options = {}) def queued_items(redis, logger=nil) logger ||= Kernel.const_defined?("LOGGER") ? Kernel.const_get("LOGGER") : Logger.new(STDOUT) all = redis.hgetall(QUEUE_HOLDER) - prefix_remove = Regexp.new(/^#{IDPREFIX}/) + prefix_remove = Regexp.new(/^#{NcboCron::Helpers::OntologyHelper::REDIS_SUBMISSION_ID_PREFIX}/) items = [] all.each do |key, val| begin @@ -85,11 +73,6 @@ def queued_items(redis, logger=nil) items end - def get_prefixed_id(id) - "#{IDPREFIX}#{id}" - end - - # Zombie graphs are submission graphs from ontologies that have been deleted def zombie_classes_graphs query = "SELECT DISTINCT ?g WHERE { GRAPH ?g { ?s ?p ?o }}" class_graphs = [] @@ -182,7 +165,7 @@ def process_submission(logger, submission_id, actions=ACTIONS) # Check to make sure the file has been downloaded if sub.pullLocation && (!sub.uploadFilePath || !File.exist?(sub.uploadFilePath)) - multi_logger.debug "Pull location found, but no file in the upload file path. Retrying download." + multi_logger.debug "Pull location found (#{sub.pullLocation}, but no file in the upload file path (#{sub.uploadFilePath}. Retrying download." file, filename = sub.download_ontology_file file_location = sub.class.copy_file_repository(sub.ontology.acronym, sub.submissionId, file, filename) file_location = "../" + file_location if file_location.start_with?(".") # relative path fix @@ -207,6 +190,10 @@ def process_submission(logger, submission_id, actions=ACTIONS) end end + def get_prefixed_id(id) + NcboCron::Helpers::OntologyHelper.get_prefixed_id(id) + end + private def archive_old_submissions(logger, sub) @@ -237,10 +224,11 @@ def process_annotator(logger, sub) begin annotator = Annotator::Models::NcboAnnotator.new annotator.create_term_cache_for_submission(logger, sub) - # commenting this action out for now due to a problem with hgetall in redis + # this action only occurs if the CRON dictionary generation job is disabled + # if the CRON dictionary generation job is running, + # the dictionary will NOT be generated on each ontology parsing # see https://github.com/ncbo/ncbo_cron/issues/45 for details - # mgrep dictionary generation will occur as a separate CRON task - # annotator.generate_dictionary_file() + annotator.generate_dictionary_file() unless NcboCron.settings.enable_dictionary_generation_cron_job rescue Exception => e logger.error(e.message + "\n" + e.backtrace.join("\n\t")) logger.flush() diff --git a/lib/ncbo_cron/spam_deletion.rb b/lib/ncbo_cron/spam_deletion.rb index 8db5568b..e2ec64f8 100644 --- a/lib/ncbo_cron/spam_deletion.rb +++ b/lib/ncbo_cron/spam_deletion.rb @@ -25,8 +25,18 @@ def initialize(logger=nil) end def run - auth_token = Base64.decode64(NcboCron.settings.git_repo_access_token) + auth_token = NcboCron.settings.git_repo_access_token res = `curl --header 'Authorization: token #{auth_token}' --header 'Accept: application/vnd.github.v3.raw' --location #{FULL_FILE_PATH}` + + begin + error_json = JSON.parse(res) + msg = "\nError while fetching the SPAM user list from #{FULL_FILE_PATH}: #{error_json}" + @logger.error(msg) + puts msg + exit + rescue JSON::ParserError + @logger.info("Successfully downloaded the SPAM user list from #{FULL_FILE_PATH}") + end usernames = res.split(",").map(&:strip) delete_spam(usernames) end diff --git a/ncbo_cron.gemspec b/ncbo_cron.gemspec index 821881d1..c8faa03d 100644 --- a/ncbo_cron.gemspec +++ b/ncbo_cron.gemspec @@ -8,7 +8,7 @@ Gem::Specification.new do |gem| gem.summary = %q{} gem.homepage = "https://github.com/ncbo/ncbo_cron" - gem.files = `git ls-files`.split($\) + gem.files = Dir['**/*'] gem.executables = gem.files.grep(%r{^bin/}).map{ |f| File.basename(f) } gem.test_files = gem.files.grep(%r{^(test|spec|features)/}) gem.name = "ncbo_cron" @@ -16,7 +16,7 @@ Gem::Specification.new do |gem| gem.add_dependency("dante") gem.add_dependency("goo") - gem.add_dependency("google-apis-analytics_v3") + gem.add_dependency("google-analytics-data") gem.add_dependency("mlanett-redis-lock") gem.add_dependency("multi_json") gem.add_dependency("ncbo_annotator") diff --git a/rakelib/purl_management.rake b/rakelib/purl_management.rake new file mode 100644 index 00000000..58cfadd7 --- /dev/null +++ b/rakelib/purl_management.rake @@ -0,0 +1,28 @@ +# Task for updating and adding missing purl for all ontologies +# +desc 'Purl Utilities' +namespace :purl do + require 'bundler/setup' + # Configure the process for the current cron configuration. + require_relative '../lib/ncbo_cron' + config_exists = File.exist?(File.expand_path('../../config/config.rb', __FILE__)) + abort('Please create a config/config.rb file using the config/config.rb.sample as a template') unless config_exists + require_relative '../config/config' + + desc 'update purl for all ontologies' + task :update_all do + purl_client = LinkedData::Purl::Client.new + LinkedData::Models::Ontology.all.each do |ont| + ont.bring(:acronym) + acronym = ont.acronym + + if purl_client.purl_exists(acronym) + puts "#{acronym} exists" + purl_client.fix_purl(acronym) + else + puts "#{acronym} DOES NOT exist" + purl_client.create_purl(acronym) + end + end + end +end diff --git a/test/docker-compose.yml b/test/docker-compose.yml deleted file mode 100644 index 251a526e..00000000 --- a/test/docker-compose.yml +++ /dev/null @@ -1,128 +0,0 @@ -x-app: &app - build: - context: ../. - args: - RUBY_VERSION: '2.7' - # Increase the version number in the image tag every time Dockerfile or its arguments is changed - image: ncbo_cron-dev:0.0.1 - environment: &env - # default bundle config resolves to /usr/local/bundle/config inside of the container - # we are setting it to local app directory if we need to use 'bundle config local' - BUNDLE_APP_CONFIG: /srv/ontoportal/ncbo_cron/.bundle - BUNDLE_PATH: /srv/ontoportal/bundle - COVERAGE: 'true' # enable simplecov code coverage - REDIS_HOST: redis-ut - REDIS_PORT: 6379 - SOLR_TERM_SEARCH_URL: http://solr-ut:8983/solr/term_search_core1 - SOLR_PROP_SEARCH_URL: http://solr-ut:8983/solr/prop_search_core1 - MGREP_HOST: mgrep-ut - MGREP_PORT: 55555 - stdin_open: true - tty: true - command: /bin/bash - volumes: - # bundle volume for hosting gems installed by bundle; it speeds up gem install in local development - - bundle:/srv/ontoportal/bundle - - ../.:/srv/ontoportal/ncbo_cron - # mount directory containing development version of the gems if you need to use 'bundle config local' - #- /Users/alexskr/ontoportal:/Users/alexskr/ontoportal - depends_on: &depends_on - solr-ut: - condition: service_started - redis-ut: - condition: service_started - mgrep-ut: - condition: service_healthy - - -services: - # environment wtih 4store backend - ruby: - <<: *app - environment: - <<: *env - GOO_BACKEND_NAME: 4store - GOO_PORT: 9000 - GOO_HOST: 4store-ut - GOO_PATH_QUERY: /sparql/ - GOO_PATH_DATA: /data/ - GOO_PATH_UPDATE: /update/ - profiles: - - 4store - depends_on: - <<: *depends_on - 4store-ut: - condition: service_started - - # environment with AllegroGraph backend - ruby-agraph: - <<: *app - environment: - <<: *env - GOO_BACKEND_NAME: ag - GOO_PORT: 10035 - GOO_HOST: agraph-ut - GOO_PATH_QUERY: /repositories/bioportal_test - GOO_PATH_DATA: /repositories/bioportal_test/statements - GOO_PATH_UPDATE: /repositories/bioportal_test/statements - # profiles: - #- agraph - depends_on: - <<: *depends_on - agraph-ut: - condition: service_started - - redis-ut: - image: redis - ports: - - 6379:6379 - - 4store-ut: - image: bde2020/4store - command: > - bash -c "4s-backend-setup --segments 4 ontoportal_kb - && 4s-backend ontoportal_kb - && 4s-httpd -D -s-1 -p 9000 ontoportal_kb" - profiles: - - 4store - - solr-ut: - image: solr:8 - volumes: - - ./solr/configsets:/configsets:ro - ports: - - "8983:8983" - command: > - bash -c "precreate-core term_search_core1 /configsets/term_search - && precreate-core prop_search_core1 /configsets/property_search - && solr-foreground" - - - mgrep-ut: - image: ontoportal/mgrep-ncbo:0.1 - healthcheck: - test: ["CMD", "nc", "-z", "-v", "localhost", "55555"] - start_period: 3s - interval: 10s - timeout: 5s - retries: 5 - - agraph-ut: - image: franzinc/agraph:v7.3.0 - environment: - - AGRAPH_SUPER_USER=test - - AGRAPH_SUPER_PASSWORD=xyzzy - shm_size: 1g - # ports: - # - 10035:10035 - command: > - bash -c "/agraph/bin/agraph-control --config /agraph/etc/agraph.cfg start - ; agtool repos create bioportal_test - ; agtool users add anonymous - ; agtool users grant anonymous root:bioportal_test:rw - ; tail -f /agraph/data/agraph.log" - # profiles: - #- agraph - -volumes: - bundle: diff --git a/test/test_case.rb b/test/test_case.rb index 5f164ecd..75bb0454 100644 --- a/test/test_case.rb +++ b/test/test_case.rb @@ -56,7 +56,7 @@ def count_pattern(pattern) return 0 end - def backend_4s_delete + def backend_triplestore_delete raise StandardError, 'Too many triples in KB, does not seem right to run tests' unless count_pattern('?s ?p ?o') < 400000 @@ -89,7 +89,7 @@ def _run_suites(suites, type) end def _run_suite(suite, type) - backend_4s_delete + backend_triplestore_delete suite.before_suite if suite.respond_to?(:before_suite) super(suite, type) rescue Exception => e @@ -98,7 +98,7 @@ def _run_suite(suite, type) puts 'Traced from:' raise e ensure - backend_4s_delete + backend_triplestore_delete suite.after_suite if suite.respond_to?(:after_suite) end end diff --git a/test/test_ontology_pull.rb b/test/test_ontology_pull.rb index 450ae582..fad8aef8 100644 --- a/test/test_ontology_pull.rb +++ b/test/test_ontology_pull.rb @@ -76,6 +76,32 @@ def test_remote_ontology_pull assert_equal 2, ont.submissions.length end + def test_remote_pull_parsing_action + ontologies = init_ontologies(1, process_submissions: true) + ont = LinkedData::Models::Ontology.find(ontologies[0].id).first + ont.bring(:submissions) if ont.bring?(:submissions) + assert_equal 1, ont.submissions.length + + # add this ontology to submission queue with :remote_pull action enabled + parser = NcboCron::Models::OntologySubmissionParser.new + actions = NcboCron::Models::OntologySubmissionParser::ACTIONS.dup + actions[:remote_pull] = true + parser.queue_submission(ont.submissions[0], actions) + parser.process_queue_submissions + + # make sure there are now 2 submissions present + ont = LinkedData::Models::Ontology.find(ontologies[0].id).first + ont.bring(:submissions) if ont.bring?(:submissions) + assert_equal 2, ont.submissions.length + + # verify that no new submission is created when the file has not changed + parser.queue_submission(ont.submissions[0], actions) + parser.process_queue_submissions + ont = LinkedData::Models::Ontology.find(ontologies[0].id).first + ont.bring(:submissions) if ont.bring?(:submissions) + assert_equal 2, ont.submissions.length + end + def test_pull_error_notification server_port = Random.rand(55000..65535) @@ -164,8 +190,9 @@ def test_no_pull_location private - def init_ontologies(submission_count) - ont_count, acronyms, ontologies = LinkedData::SampleData::Ontology.create_ontologies_and_submissions(ont_count: 1, submission_count: submission_count, process_submission: false) + def init_ontologies(submission_count, process_submissions = false) + ont_count, acronyms, ontologies = LinkedData::SampleData::Ontology.create_ontologies_and_submissions( + ont_count: 1, submission_count: submission_count, process_submission: process_submissions) ontologies[0].bring(:submissions) if ontologies[0].bring?(:submissions) ontologies[0].submissions.each do |sub| sub.bring_remaining() diff --git a/test/test_scheduler.rb b/test/test_scheduler.rb index bac2f842..58808ea5 100644 --- a/test/test_scheduler.rb +++ b/test/test_scheduler.rb @@ -39,7 +39,7 @@ def test_scheduler sleep(5) finished_array = listen_string.split("\n") - assert finished_array.length >= 4 + assert_operator 4, :<=, finished_array.length assert job1_thread.alive? job1_thread.kill