diff --git a/CHANGELOG b/CHANGELOG index c0eadf5b56f..ae86734434f 100644 --- a/CHANGELOG +++ b/CHANGELOG @@ -2,6 +2,14 @@ We follow the CalVer (https://calver.org/) versioning scheme: YY.MINOR.MICRO. +19.26.0 (2019-09-11) +=================== +- Create a custom through table for linking files and versions + for storing version names. Supports different versions of the same + file having different names. +- Update README for populating institutions. + + 19.25.0 (2019-09-05) =================== - Automate account deactivation if users have no content diff --git a/README-docker-compose.md b/README-docker-compose.md index dc0742940e1..d84a2670014 100644 --- a/README-docker-compose.md +++ b/README-docker-compose.md @@ -147,7 +147,7 @@ - `docker-compose run --rm web python manage.py migrate` - Populate institutions: - After resetting your database or with a new install you will need to populate the table of institutions. **You must have run migrations first.** - - `docker-compose run --rm web python -m scripts.populate_institutions test` + - `docker-compose run --rm web python -m scripts.populate_institutions -e test -a` - Populate preprint, registration, and collection providers: - After resetting your database or with a new install, the required providers and subjects will be created automatically **when you run migrations.** To create more: - `docker-compose run --rm web python manage.py populate_fake_providers` diff --git a/addons/base/views.py b/addons/base/views.py index be9fa2d755c..763aec065a8 100644 --- a/addons/base/views.py +++ b/addons/base/views.py @@ -37,7 +37,7 @@ from addons.base import signals as file_signals from addons.base.utils import format_last_known_metadata, get_mfr_url from osf import features -from osf.models import (BaseFileNode, TrashedFileNode, +from osf.models import (BaseFileNode, TrashedFileNode, BaseFileVersionsThrough, OSFUser, AbstractNode, Preprint, NodeLog, DraftRegistration, RegistrationSchema, Guid, FileVersionUserMetadata, FileVersion) @@ -875,6 +875,10 @@ def addon_view_file(auth, node, file_node, version): args={'url': download_url.url} ) + version_names = BaseFileVersionsThrough.objects.filter( + basefilenode_id=file_node.id + ).order_by('-fileversion_id').values_list('version_name', flat=True) + ret.update({ 'urls': { 'render': render_url.url, @@ -902,6 +906,7 @@ def addon_view_file(auth, node, file_node, version): 'allow_comments': file_node.provider in settings.ADDONS_COMMENTABLE, 'checkout_user': file_node.checkout._id if file_node.checkout else None, 'pre_reg_checkout': is_pre_reg_checkout(node, file_node), + 'version_names': list(version_names) }) ret.update(rubeus.collect_addon_assets(node)) diff --git a/addons/osfstorage/models.py b/addons/osfstorage/models.py index 43e521ed186..b52151d4684 100644 --- a/addons/osfstorage/models.py +++ b/addons/osfstorage/models.py @@ -313,7 +313,8 @@ def create_version(self, creator, location, metadata=None): version._find_matching_archive(save=False) version.save() - self.versions.add(version) + # Adds version to the list of file versions - using custom through table + self.add_version(version) self.save() return version diff --git a/addons/osfstorage/tests/test_models.py b/addons/osfstorage/tests/test_models.py index 4e7fb592aed..42147bf5768 100644 --- a/addons/osfstorage/tests/test_models.py +++ b/addons/osfstorage/tests/test_models.py @@ -299,11 +299,24 @@ def test_materialized_path_nested(self): def test_copy(self): to_copy = self.node_settings.get_root().append_file('Carp') copy_to = self.node_settings.get_root().append_folder('Cloud') + version = to_copy.create_version( + self.user, + { + 'service': 'cloud', + settings.WATERBUTLER_RESOURCE: 'osf', + 'object': '06d80e', + }, { + 'sha256': 'existing', + 'vault': 'the cloud', + 'archive': 'erchiv' + }) + assert_equal(to_copy.versions.first().get_basefilenode_version(to_copy).version_name, 'Carp') copied = to_copy.copy_under(copy_to) assert_not_equal(copied, to_copy) assert_equal(copied.parent, copy_to) + assert_equal(copied.versions.first().get_basefilenode_version(copied).version_name, 'Carp') assert_equal(to_copy.parent, self.node_settings.get_root()) def test_copy_node_file_to_preprint(self): @@ -347,7 +360,7 @@ def test_move_nested_between_regions(self): for _ in range(2): version = factories.FileVersionFactory(region=self.node_settings.region) - child.versions.add(version) + child.add_version(version) child.save() moved = to_move.move_under(move_to) @@ -361,8 +374,21 @@ def test_move_nested_between_regions(self): def test_copy_rename(self): to_copy = self.node_settings.get_root().append_file('Carp') copy_to = self.node_settings.get_root().append_folder('Cloud') + version = to_copy.create_version( + self.user, + { + 'service': 'cloud', + settings.WATERBUTLER_RESOURCE: 'osf', + 'object': '06d80e', + }, { + 'sha256': 'existing', + 'vault': 'the cloud', + 'archive': 'erchiv' + }) + assert_equal(to_copy.versions.first().get_basefilenode_version(to_copy).version_name, 'Carp') copied = to_copy.copy_under(copy_to, name='But') + assert_equal(copied.versions.first().get_basefilenode_version(copied).version_name, 'But') assert_equal(copied.name, 'But') assert_not_equal(copied, to_copy) @@ -381,12 +407,25 @@ def test_move(self): def test_move_and_rename(self): to_move = self.node_settings.get_root().append_file('Carp') + version = to_move.create_version( + self.user, + { + 'service': 'cloud', + settings.WATERBUTLER_RESOURCE: 'osf', + 'object': '06d80e', + }, { + 'sha256': 'existing', + 'vault': 'the cloud', + 'archive': 'erchiv' + }) move_to = self.node_settings.get_root().append_folder('Cloud') + assert_equal(to_move.versions.first().get_basefilenode_version(to_move).version_name, 'Carp') moved = to_move.move_under(move_to, name='Tuna') assert_equal(to_move, moved) assert_equal(to_move.name, 'Tuna') + assert_equal(moved.versions.first().get_basefilenode_version(moved).version_name, 'Tuna') assert_equal(moved.parent, move_to) def test_move_preprint_primary_file_to_node(self): @@ -649,7 +688,7 @@ def test_after_fork_copies_versions(self): for _ in range(num_versions): version = factories.FileVersionFactory() - record.versions.add(version) + record.add_version(version) fork = self.project.fork_node(self.auth_obj) fork_node_settings = fork.get_addon('osfstorage') diff --git a/addons/osfstorage/tests/test_utils.py b/addons/osfstorage/tests/test_utils.py index 7675db8bf36..7e5e82d2711 100644 --- a/addons/osfstorage/tests/test_utils.py +++ b/addons/osfstorage/tests/test_utils.py @@ -12,6 +12,7 @@ from addons.osfstorage import utils from addons.osfstorage.tests.utils import StorageTestCase +from website.files.utils import attach_versions @pytest.mark.django_db @@ -25,7 +26,7 @@ def setUp(self): factories.FileVersionFactory(creator=self.user) for __ in range(3) ] - self.record.versions = self.versions + attach_versions(self.record, self.versions) self.record.save() def test_serialize_revision(self): diff --git a/addons/osfstorage/tests/test_views.py b/addons/osfstorage/tests/test_views.py index 4fc86d2c5bb..396d5aff6d3 100644 --- a/addons/osfstorage/tests/test_views.py +++ b/addons/osfstorage/tests/test_views.py @@ -31,17 +31,18 @@ from osf.models import files as models from addons.osfstorage.apps import osf_storage_root from addons.osfstorage import utils -from addons.base.views import make_auth +from addons.base.views import make_auth, addon_view_file from addons.osfstorage import settings as storage_settings -from api_tests.utils import create_test_file +from api_tests.utils import create_test_file, create_test_preprint_file from api.caching.settings import STORAGE_USAGE_KEY from osf_tests.factories import ProjectFactory, ApiOAuth2PersonalTokenFactory, PreprintFactory +from website.files.utils import attach_versions def create_record_with_version(path, node_settings, **kwargs): version = factories.FileVersionFactory(**kwargs) record = node_settings.get_root().append_file(path) - record.versions.add(version) + record.add_version(version) record.save() return record @@ -76,7 +77,7 @@ def test_file_metdata(self): path = u'kind/of/magíc.mp3' record = recursively_create_file(self.node_settings, path) version = factories.FileVersionFactory() - record.versions.add(version) + record.add_version(version) record.save() res = self.send_hook( 'osfstorage_get_metadata', @@ -91,7 +92,7 @@ def test_preprint_primary_file_metadata(self): preprint = PreprintFactory() record = preprint.primary_file version = factories.FileVersionFactory() - record.versions.add(version) + record.add_version(version) record.save() res = self.send_hook( 'osfstorage_get_metadata', @@ -106,7 +107,7 @@ def test_children_metadata(self): path = u'kind/of/magíc.mp3' record = recursively_create_file(self.node_settings, path) version = factories.FileVersionFactory() - record.versions.add(version) + record.add_version(version) record.save() res = self.send_hook( 'osfstorage_get_children', @@ -141,7 +142,7 @@ def test_children_metadata_preprint(self): preprint = PreprintFactory() record = preprint.primary_file version = factories.FileVersionFactory() - record.versions.add(version) + record.add_version(version) record.save() res = self.send_hook( 'osfstorage_get_children', @@ -274,7 +275,9 @@ def test_upload_create(self): assert_is_not(version, None) assert_equal([version], list(record.versions.all())) assert_not_in(version, self.record.versions.all()) + assert_equal(version.get_basefilenode_version(record).version_name, record.name) assert_equal(record.serialize(), res.json['data']) + assert_equal(version.get_basefilenode_version(record).version_name, record.name) assert_equal(res.json['data']['downloads'], self.record.get_download_count()) def test_upload_update(self): @@ -321,6 +324,7 @@ def test_upload_create_child(self): record = parent.find_child_by_name(name) assert_in(version, record.versions.all()) assert_equals(record.name, name) + assert_equals(record.versions.first().get_basefilenode_version(record).version_name, name) assert_equals(record.parent, parent) def test_upload_create_child_with_same_name(self): @@ -341,6 +345,7 @@ def test_upload_create_child_with_same_name(self): record = parent.find_child_by_name(name) assert_in(version, record.versions.all()) assert_equals(record.name, name) + assert_equals(record.versions.first().get_basefilenode_version(record).version_name, name) assert_equals(record.parent, parent) def test_upload_fail_to_create_version_due_to_checkout(self): @@ -514,6 +519,7 @@ def test_upload_update(self): version = models.FileVersion.load(res.json['version']) assert_is_not(version, None) assert_in(version, self.record.versions.all()) + assert_equal(self.record.versions.first().get_basefilenode_version(self.record).version_name, self.name) def test_upload_duplicate(self): location = { @@ -637,7 +643,7 @@ def setUp(self): self.path = 'greasy/pízza.png' self.record = recursively_create_file(self.node_settings, self.path) self.version = factories.FileVersionFactory() - self.record.versions = [self.version] + self.record.add_version(self.version) self.record.save() self.payload = { 'metadata': { @@ -711,7 +717,7 @@ def setUp(self): self.record = self.preprint.primary_file self.path = 'greasy/pízza.png' self.version = factories.FileVersionFactory() - self.record.versions = [self.version] + self.record.add_version(self.version) self.record.save() self.payload = { 'metadata': { @@ -783,7 +789,7 @@ def setUp(self): super(TestGetRevisions, self).setUp() self.path = 'tie/your/mother/down.mp3' self.record = recursively_create_file(self.node_settings, self.path) - self.record.versions = [factories.FileVersionFactory() for __ in range(15)] + attach_versions(self.record, [factories.FileVersionFactory() for __ in range(15)]) self.record.save() def get_revisions(self, fid=None, guid=None, **kwargs): @@ -1165,6 +1171,35 @@ def test_move_file_out_of_node(self): ) assert_equal(res.status_code, 200) + def test_can_rename_file(self): + file = create_test_file(self.node, self.user, filename='road_dogg.mp3') + new_name = 'JesseJames.mp3' + + res = self.send_hook( + 'osfstorage_move_hook', + {'guid': self.node._id}, + payload={ + 'action': 'rename', + 'source': file._id, + 'target': self.root_node._id, + 'user': self.user._id, + 'name': file.name, + 'destination': { + 'parent': self.root_node._id, + 'target': self.node._id, + 'name': new_name, + } + }, + target=self.node, + method='post_json', + expect_errors=True, + ) + file.reload() + + assert_equal(res.status_code, 200) + assert_equal(file.name, new_name) + assert_equal(file.versions.first().get_basefilenode_version(file).version_name, new_name) + def test_can_move_file_out_of_quickfiles_node(self): quickfiles_node = QuickFilesNode.objects.get_for_user(self.user) quickfiles_file = create_test_file(quickfiles_node, self.user, filename='slippery.mp3') @@ -1254,6 +1289,35 @@ def test_move_primary_file_out_of_preprint(self): ) assert_equal(res.status_code, 403) + def test_can_rename_file(self): + file = create_test_preprint_file(self.node, self.user, filename='road_dogg.mp3') + new_name = 'JesseJames.mp3' + + res = self.send_hook( + 'osfstorage_move_hook', + {'guid': self.node._id}, + payload={ + 'action': 'rename', + 'source': file._id, + 'target': self.root_node._id, + 'user': self.user._id, + 'name': file.name, + 'destination': { + 'parent': self.root_node._id, + 'target': self.node._id, + 'name': new_name, + } + }, + target=self.node, + method='post_json', + expect_errors=True, + ) + file.reload() + + assert_equal(res.status_code, 200) + assert_equal(file.name, new_name) + assert_equal(file.versions.first().get_basefilenode_version(file).version_name, new_name) + @pytest.mark.django_db class TestMoveHookProjectsOnly(TestMoveHook): @@ -1507,7 +1571,6 @@ def test_file_remove_tag_fail_doesnt_create_log(self, mock_log): @pytest.mark.django_db @pytest.mark.enable_bookmark_creation class TestFileViews(StorageTestCase): - def test_file_views(self): file = create_test_file(target=self.node, user=self.user) url = self.node.web_url_for('addon_view_or_download_file', path=file._id, provider=file.provider) @@ -1553,6 +1616,40 @@ def test_download_file(self): redirect = self.app.get(url, auth=self.user.auth, expect_errors=True) assert redirect.status_code == 400 + def test_addon_view_file(self): + file = create_test_file(target=self.node, user=self.user, filename='first_name') + version = factories.FileVersionFactory() + file.add_version(version) + file.move_under(self.node_settings.get_root(), name='second_name') + file.save() + + version = factories.FileVersionFactory() + file.add_version(version) + file.move_under(self.node_settings.get_root(), name='third_name') + file.save() + + ret = addon_view_file(Auth(self.user), self.node, file, version) + assert ret['version_names'] == ['third_name', 'second_name', 'first_name'] + + def test_osfstorage_download_view(self): + file = create_test_file(target=self.node, user=self.user) + version = factories.FileVersionFactory() + file.add_version(version) + file.move_under(self.node_settings.get_root(), name='new_name') + file.save() + + res = self.app.get( + api_url_for( + 'osfstorage_download', + fid=file._id, + guid=self.node._id, + **signing.sign_data(signing.default_signer, {}) + ), + auth=self.user.auth, + ) + assert res.status_code == 200 + assert res.json['data']['name'] == 'new_name' + @responses.activate @mock.patch('framework.auth.cas.get_client') def test_download_file_with_token(self, mock_get_client): diff --git a/addons/osfstorage/views.py b/addons/osfstorage/views.py index 9d9c07b8bbb..8c5593946c9 100644 --- a/addons/osfstorage/views.py +++ b/addons/osfstorage/views.py @@ -192,7 +192,7 @@ def osfstorage_get_children(file_node, **kwargs): , 'kind', 'file' , 'size', LATEST_VERSION.size , 'downloads', COALESCE(DOWNLOAD_COUNT, 0) - , 'version', (SELECT COUNT(*) FROM osf_basefilenode_versions WHERE osf_basefilenode_versions.basefilenode_id = F.id) + , 'version', (SELECT COUNT(*) FROM osf_basefileversionsthrough WHERE osf_basefileversionsthrough.basefilenode_id = F.id) , 'contentType', LATEST_VERSION.content_type , 'modified', LATEST_VERSION.created , 'created', EARLIEST_VERSION.created @@ -213,15 +213,15 @@ def osfstorage_get_children(file_node, **kwargs): FROM osf_basefilenode AS F LEFT JOIN LATERAL ( SELECT * FROM osf_fileversion - JOIN osf_basefilenode_versions ON osf_fileversion.id = osf_basefilenode_versions.fileversion_id - WHERE osf_basefilenode_versions.basefilenode_id = F.id + JOIN osf_basefileversionsthrough ON osf_fileversion.id = osf_basefileversionsthrough.fileversion_id + WHERE osf_basefileversionsthrough.basefilenode_id = F.id ORDER BY created DESC LIMIT 1 ) LATEST_VERSION ON TRUE LEFT JOIN LATERAL ( SELECT * FROM osf_fileversion - JOIN osf_basefilenode_versions ON osf_fileversion.id = osf_basefilenode_versions.fileversion_id - WHERE osf_basefilenode_versions.basefilenode_id = F.id + JOIN osf_basefileversionsthrough ON osf_fileversion.id = osf_basefileversionsthrough.fileversion_id + WHERE osf_basefileversionsthrough.basefilenode_id = F.id ORDER BY created ASC LIMIT 1 ) EARLIEST_VERSION ON TRUE @@ -240,9 +240,9 @@ def osfstorage_get_children(file_node, **kwargs): SELECT EXISTS( SELECT (1) FROM osf_fileversionusermetadata INNER JOIN osf_fileversion ON osf_fileversionusermetadata.file_version_id = osf_fileversion.id - INNER JOIN osf_basefilenode_versions ON osf_fileversion.id = osf_basefilenode_versions.fileversion_id + INNER JOIN osf_basefileversionsthrough ON osf_fileversion.id = osf_basefileversionsthrough.fileversion_id WHERE osf_fileversionusermetadata.user_id = %s - AND osf_basefilenode_versions.basefilenode_id = F.id + AND osf_basefileversionsthrough.basefilenode_id = F.id LIMIT 1 ) ) SEEN_FILE ON TRUE @@ -336,6 +336,7 @@ def osfstorage_create_child(file_node, payload, **kwargs): )) except KeyError: raise HTTPError(httplib.BAD_REQUEST) + current_version = file_node.get_version() new_version = file_node.create_version(user, location, metadata) @@ -403,9 +404,12 @@ def osfstorage_download(file_node, payload, **kwargs): raise make_error(httplib.BAD_REQUEST, message_short='Version must be an integer if not specified') version = file_node.get_version(version_id, required=True) + file_version_thru = version.get_basefilenode_version(file_node) + name = file_version_thru.version_name if file_version_thru else file_node.name + return { 'data': { - 'name': file_node.name, + 'name': name, 'path': version.location_hash, }, 'settings': { diff --git a/api/files/serializers.py b/api/files/serializers.py index 4e4bbbd5501..2e2782a4850 100644 --- a/api/files/serializers.py +++ b/api/files/serializers.py @@ -410,6 +410,7 @@ class FileVersionSerializer(JSONAPISerializer): size = ser.IntegerField(read_only=True, help_text='The size of this file at this version') content_type = ser.CharField(read_only=True, help_text='The mime type of this file at this verison') date_created = VersionedDateTimeField(source='created', read_only=True, help_text='The date that this version was created') + name = ser.SerializerMethodField() links = LinksField({ 'self': 'self_url', 'html': 'absolute_url', @@ -417,6 +418,10 @@ class FileVersionSerializer(JSONAPISerializer): 'render': 'get_render_link', }) + def get_name(self, obj): + file = self.context['file'] + return obj.get_basefilenode_version(file).version_name + class Meta: type_ = 'file_versions' diff --git a/api_tests/files/views/test_file_detail.py b/api_tests/files/views/test_file_detail.py index 78d36975af2..b5914669150 100644 --- a/api_tests/files/views/test_file_detail.py +++ b/api_tests/files/views/test_file_detail.py @@ -674,7 +674,9 @@ def test_listing(self, app, user, file): assert res.status_code == 200 assert len(res.json['data']) == 2 assert res.json['data'][0]['id'] == '2' + assert res.json['data'][0]['attributes']['name'] == file.name assert res.json['data'][1]['id'] == '1' + assert res.json['data'][1]['attributes']['name'] == file.name def test_load_and_property(self, app, user, file): # test_by_id diff --git a/osf/management/commands/data_storage_usage.py b/osf/management/commands/data_storage_usage.py index 4d557bd9c9e..5f68baaa62d 100644 --- a/osf/management/commands/data_storage_usage.py +++ b/osf/management/commands/data_storage_usage.py @@ -48,7 +48,7 @@ LAST_ROW_SQL = """ SELECT obfnv.id AS fileversion_id - FROM osf_basefilenode_versions AS obfnv + FROM osf_basefileversionsthrough AS obfnv ORDER BY obfnv.id DESC LIMIT 1 """ @@ -73,7 +73,7 @@ node.is_deleted AS node_deleted, node.spam_status, preprint.id IS NOT NULL AS is_supplementary_node - FROM osf_basefilenode_versions AS obfnv + FROM osf_basefileversionsthrough AS obfnv LEFT JOIN osf_basefilenode file ON obfnv.basefilenode_id = file.id LEFT JOIN osf_fileversion version ON obfnv.fileversion_id = version.id LEFT JOIN addons_osfstorage_region region ON version.region_id = region.id @@ -89,7 +89,7 @@ TOTAL_FILE_SIZE_SUM_SQL = """ SELECT 'total', sum(size) AS deleted_size_sum - FROM osf_basefilenode_versions AS obfnv + FROM osf_basefileversionsthrough AS obfnv LEFT JOIN osf_basefilenode file ON obfnv.basefilenode_id = file.id LEFT JOIN osf_fileversion version ON obfnv.fileversion_id = version.id WHERE file.provider = 'osfstorage' @@ -100,7 +100,7 @@ DELETED_FILE_SIZE_SUM_SQL = """ SELECT 'deleted', sum(size) AS deleted_size_sum - FROM osf_basefilenode_versions AS obfnv + FROM osf_basefileversionsthrough AS obfnv LEFT JOIN osf_basefilenode file ON obfnv.basefilenode_id = file.id LEFT JOIN osf_fileversion version ON obfnv.fileversion_id = version.id WHERE file.provider = 'osfstorage' @@ -112,7 +112,7 @@ REGIONAL_NODE_SIZE_SUM_SQL = """ SELECT region.name, sum(size) - FROM osf_basefilenode_versions AS obfnv + FROM osf_basefileversionsthrough AS obfnv LEFT JOIN osf_basefilenode file ON obfnv.basefilenode_id = file.id LEFT JOIN osf_fileversion version ON obfnv.fileversion_id = version.id LEFT JOIN addons_osfstorage_region region ON version.region_id = region.id @@ -128,7 +128,7 @@ node.type = 'osf.node' AND NOT node.is_public ) THEN 'osf.private-node' ELSE node.type END AS type, sum(size) - FROM osf_basefilenode_versions AS obfnv + FROM osf_basefileversionsthrough AS obfnv LEFT JOIN osf_basefilenode file ON obfnv.basefilenode_id = file.id LEFT JOIN osf_fileversion version ON obfnv.fileversion_id = version.id LEFT JOIN osf_abstractnode node ON file.target_object_id = node.id @@ -144,7 +144,7 @@ ND_QUICK_FILE_SIZE_SUM_SQL = """ SELECT node.type, sum(size) - FROM osf_basefilenode_versions AS obfnv + FROM osf_basefileversionsthrough AS obfnv LEFT JOIN osf_basefilenode file ON obfnv.basefilenode_id = file.id LEFT JOIN osf_fileversion version ON obfnv.fileversion_id = version.id LEFT JOIN osf_abstractnode node ON file.target_object_id = node.id @@ -161,7 +161,7 @@ ND_PREPRINT_SUPPLEMENT_SIZE_SUM_SQL = """ SELECT 'nd_supplement', sum(size) AS supplementary_node_size - FROM osf_basefilenode_versions AS obfnv + FROM osf_basefileversionsthrough AS obfnv LEFT JOIN osf_basefilenode file ON obfnv.basefilenode_id = file.id LEFT JOIN osf_fileversion version ON obfnv.fileversion_id = version.id LEFT JOIN osf_abstractnode node ON node.id = file.target_object_id @@ -193,7 +193,7 @@ preprint.deleted IS NOT NULL AS preprint_deleted, preprint.spam_status, FALSE AS is_supplementary_node - FROM osf_basefilenode_versions AS obfnv + FROM osf_basefileversionsthrough AS obfnv LEFT JOIN osf_basefilenode file ON obfnv.basefilenode_id = file.id LEFT JOIN osf_fileversion version ON obfnv.fileversion_id = version.id LEFT JOIN addons_osfstorage_region region ON version.region_id = region.id @@ -207,7 +207,7 @@ ND_PREPRINT_SIZE_SUM_SQL = """ SELECT 'nd_preprints', sum(size) AS nd_preprint_size_sum - FROM osf_basefilenode_versions AS obfnv + FROM osf_basefileversionsthrough AS obfnv LEFT JOIN osf_basefilenode file ON obfnv.basefilenode_id = file.id LEFT JOIN osf_fileversion version ON obfnv.fileversion_id = version.id LEFT JOIN osf_preprint preprint ON preprint.id = file.target_object_id @@ -220,7 +220,7 @@ REGIONAL_PREPRINT_SIZE_SUM_SQL = """ SELECT region.name, sum(size) - FROM osf_basefilenode_versions AS obfnv + FROM osf_basefileversionsthrough AS obfnv LEFT JOIN osf_basefilenode file ON obfnv.basefilenode_id = file.id LEFT JOIN osf_fileversion version ON obfnv.fileversion_id = version.id LEFT JOIN addons_osfstorage_region region ON version.region_id = region.id diff --git a/osf/management/commands/force_archive.py b/osf/management/commands/force_archive.py index 82188459959..d066830a050 100644 --- a/osf/management/commands/force_archive.py +++ b/osf/management/commands/force_archive.py @@ -38,6 +38,7 @@ from scripts import utils as script_utils from website.archiver import ARCHIVER_SUCCESS from website.settings import ARCHIVE_TIMEOUT_TIMEDELTA, ARCHIVE_PROVIDER +from website.files.utils import attach_versions logger = logging.getLogger(__name__) @@ -197,7 +198,7 @@ def manually_archive(tree, reg, node_settings, parent=None): if file_obj.versions.exists() and filenode['version']: # Min version identifier is 1 if not cloned.versions.filter(identifier=filenode['version']).exists(): - cloned.versions.add(*file_obj.versions.filter(identifier__lte=filenode['version'])) + attach_versions(cloned, file_obj.versions.filter(identifier__lte=filenode['version']), file_obj) if filenode.get('children'): manually_archive(filenode['children'], reg, node_settings, parent=cloned) diff --git a/osf/migrations/0182_add_custom_file_versions_through.py b/osf/migrations/0182_add_custom_file_versions_through.py new file mode 100644 index 00000000000..e4d5f1b19b6 --- /dev/null +++ b/osf/migrations/0182_add_custom_file_versions_through.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Generated by Django 1.11.15 on 2019-04-07 23:20 +from __future__ import unicode_literals + +from django.db import migrations, models +import django.db.models.deletion + + +class Migration(migrations.Migration): + + dependencies = [ + ('osf', '0181_osfuser_contacted_deactivation'), + ] + + operations = [ + migrations.CreateModel( + name='BaseFileVersionsThrough', + fields=[ + ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('version_name', models.TextField(blank=True)), + ('basefilenode', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='osf.BaseFileNode')), + ('fileversion', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='osf.FileVersion')), + ], + ), + migrations.AlterUniqueTogether( + name='basefileversionsthrough', + unique_together=set([('basefilenode', 'fileversion')]), + ), + migrations.AlterIndexTogether( + name='basefileversionsthrough', + index_together=set([('basefilenode', 'fileversion')]), + ), + ] diff --git a/osf/migrations/0183_populate_file_versions_through.py b/osf/migrations/0183_populate_file_versions_through.py new file mode 100644 index 00000000000..703c2767588 --- /dev/null +++ b/osf/migrations/0183_populate_file_versions_through.py @@ -0,0 +1,98 @@ +# -*- coding: utf-8 -*- +# Generated by Django 1.11.15 on 2019-03-03 17:52 +from __future__ import unicode_literals + +import logging + +from django.db import migrations, connection + +logger = logging.getLogger(__file__) + + +def restore_default_through_table(state, schema): + sql = """ + DROP TABLE osf_basefilenode_versions; + CREATE TABLE osf_basefilenode_versions AS + SELECT + new_thru.basefilenode_id, + new_thru.fileversion_id + FROM + osf_basefileversionsthrough AS new_thru; + + ALTER TABLE osf_basefilenode_versions ADD COLUMN id SERIAL PRIMARY KEY; + ALTER TABLE osf_basefilenode_versions ADD CONSTRAINT osf_basefilenod_basefilenode_id_b0knah27_fk_osf_basefilenode_id FOREIGN KEY (basefilenode_id) REFERENCES osf_basefilenode DEFERRABLE INITIALLY DEFERRED; + ALTER TABLE osf_basefilenode_versions ALTER COLUMN basefilenode_id + SET + DATA TYPE INTEGER; + ALTER TABLE osf_basefilenode_versions ALTER COLUMN fileversion_id + SET + NOT NULL; + ALTER TABLE osf_basefilenode_versions ALTER COLUMN fileversion_id + SET + DATA TYPE INTEGER; + ALTER TABLE osf_basefilenode_versions ALTER COLUMN basefilenode_id + SET + NOT NULL; + ALTER TABLE osf_basefilenode_versions ADD CONSTRAINT osf_basefilenode__fileversion_id_93etanfc_fk_osf_fileversion_id FOREIGN KEY (fileversion_id) REFERENCES osf_fileversion DEFERRABLE INITIALLY DEFERRED; + ALTER TABLE osf_basefilenode_versions ADD CONSTRAINT osf_basefilenode__fileversion_uniq564 UNIQUE (basefilenode_id, fileversion_id); + CREATE INDEX + ON osf_basefilenode_versions (basefilenode_id, fileversion_id); + CREATE INDEX + ON osf_basefilenode_versions (basefilenode_id); + CREATE INDEX + ON osf_basefilenode_versions (fileversion_id); + """ + with connection.cursor() as cursor: + cursor.execute(sql) + + +def populate_fileversion_name(state, schema): + + sql = """ + DROP TABLE osf_basefileversionsthrough; + CREATE TABLE osf_basefileversionsthrough AS + SELECT + obfv.basefilenode_id, + obfv.fileversion_id, + ob.name as version_name + FROM + osf_basefilenode_versions obfv + LEFT JOIN + osf_basefilenode ob + ON obfv.basefilenode_id = ob.id; + ALTER TABLE osf_basefileversionsthrough ADD COLUMN id SERIAL PRIMARY KEY; + ALTER TABLE osf_basefileversionsthrough ADD CONSTRAINT osf_basefilenod_basefilenode_id_b0nwad27_fk_osf_basefilenode_id FOREIGN KEY (basefilenode_id) REFERENCES osf_basefilenode DEFERRABLE INITIALLY DEFERRED; + ALTER TABLE osf_basefileversionsthrough ALTER COLUMN basefilenode_id + SET + DATA TYPE INTEGER; + ALTER TABLE osf_basefileversionsthrough ALTER COLUMN fileversion_id + SET + NOT NULL; + ALTER TABLE osf_basefileversionsthrough ALTER COLUMN fileversion_id + SET + DATA TYPE INTEGER; + ALTER TABLE osf_basefileversionsthrough ALTER COLUMN basefilenode_id + SET + NOT NULL; + ALTER TABLE osf_basefileversionsthrough ADD CONSTRAINT osf_basefilenode__fileversion_id_93nwadfc_fk_osf_fileversion_id FOREIGN KEY (fileversion_id) REFERENCES osf_fileversion DEFERRABLE INITIALLY DEFERRED; + ALTER TABLE osf_basefileversionsthrough ADD CONSTRAINT osf_basefilenode__fileversion_uniq UNIQUE (basefilenode_id, fileversion_id); + CREATE INDEX + ON osf_basefileversionsthrough (basefilenode_id, fileversion_id); + CREATE INDEX + ON osf_basefileversionsthrough (basefilenode_id); + CREATE INDEX + ON osf_basefileversionsthrough (fileversion_id); + """ + + with connection.cursor() as cursor: + cursor.execute(sql) + +class Migration(migrations.Migration): + + dependencies = [ + ('osf', '0182_add_custom_file_versions_through'), + ] + + operations = [ + migrations.RunPython(populate_fileversion_name, restore_default_through_table) + ] diff --git a/osf/migrations/0184_remove_basefilenode_versions.py b/osf/migrations/0184_remove_basefilenode_versions.py new file mode 100644 index 00000000000..7218a83c71a --- /dev/null +++ b/osf/migrations/0184_remove_basefilenode_versions.py @@ -0,0 +1,19 @@ +# -*- coding: utf-8 -*- +# Generated by Django 1.11.15 on 2019-04-08 00:51 +from __future__ import unicode_literals + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('osf', '0183_populate_file_versions_through'), + ] + + operations = [ + migrations.RemoveField( + model_name='basefilenode', + name='versions', + ), + ] diff --git a/osf/migrations/0185_basefilenode_versions.py b/osf/migrations/0185_basefilenode_versions.py new file mode 100644 index 00000000000..c910787a7c4 --- /dev/null +++ b/osf/migrations/0185_basefilenode_versions.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Generated by Django 1.11.15 on 2019-04-08 00:52 +from __future__ import unicode_literals + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('osf', '0184_remove_basefilenode_versions'), + ] + + operations = [ + migrations.AddField( + model_name='basefilenode', + name='versions', + field=models.ManyToManyField(through='osf.BaseFileVersionsThrough', to='osf.FileVersion'), + ), + ] diff --git a/osf/models/__init__.py b/osf/models/__init__.py index 45519591223..b4caeab6ff4 100644 --- a/osf/models/__init__.py +++ b/osf/models/__init__.py @@ -31,6 +31,7 @@ from osf.models.identifiers import Identifier # noqa from osf.models.files import ( # noqa BaseFileNode, + BaseFileVersionsThrough, File, Folder, # noqa FileVersion, TrashedFile, TrashedFileNode, TrashedFolder, FileVersionUserMetadata, # noqa ) # noqa diff --git a/osf/models/files.py b/osf/models/files.py index 63884ce8acd..2f143cf109b 100644 --- a/osf/models/files.py +++ b/osf/models/files.py @@ -89,7 +89,7 @@ class BaseFileNode(TypedModel, CommentableMixin, OptionalGuidMixin, Taggable, Ob # Add regardless it can be pinned to a version or not _history = DateTimeAwareJSONField(default=list, blank=True) # A concrete version of a FileNode, must have an identifier - versions = models.ManyToManyField('FileVersion') + versions = models.ManyToManyField('FileVersion', through='BaseFileVersionsThrough') target_content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE) target_object_id = models.PositiveIntegerField() @@ -234,6 +234,18 @@ def to_storage(self, **kwargs): storage.pop(key) return storage + def add_version(self, version, name=None): + """ + Relates the file object to the version object. + :param version: Version object + :param name: Name, optional. Pass in if this version needs to have + a different name than the file + :return: Returns version that was passed in + """ + version_name = name or self.name + BaseFileVersionsThrough.objects.create(fileversion=version, basefilenode=self, version_name=version_name) + return version + @classmethod def files_checked_out(cls, user): """ @@ -359,10 +371,18 @@ def copy_under(self, destination_parent, name=None): return utils.copy_files(self, destination_parent.target, destination_parent, name=name) def move_under(self, destination_parent, name=None): + renaming = name != self.name self.name = name or self.name self.parent = destination_parent self._update_node(save=True) # Trust _update_node to save us + if renaming and self.is_file and self.versions.exists(): + newest_version = self.versions.first() + node_file_version = newest_version.get_basefilenode_version(self) + # Rename version in through table + node_file_version.version_name = self.name + node_file_version.save() + return self def belongs_to_node(self, target_id): @@ -443,6 +463,7 @@ def __repr__(self): self.target ) + class UnableToRestore(Exception): pass @@ -458,7 +479,7 @@ def kind(self): def update(self, revision, data, user=None, save=True): """Using revision and data update all data pretaining to self :param str or None revision: The revision that data points to - :param dict data: Metadata recieved from waterbutler + :param dict data: Metadata received from waterbutler :returns: FileVersion """ self.name = data['name'] @@ -479,7 +500,8 @@ def update(self, revision, data, user=None, save=True): # Dont save the latest information if revision is not None: version.save() - self.versions.add(version) + # Adds version to the list of file versions - using custom through table + self.add_version(version) for entry in self.history: # Some entry might have an undefined modified field if data['modified'] is not None and entry['modified'] is not None and data['modified'] < entry['modified']: @@ -750,6 +772,11 @@ def archive(self): def is_duplicate(self, other): return self.location_hash == other.location_hash + def get_basefilenode_version(self, file): + # Returns the throughtable object - the record that links this version + # to the given file. + return self.basefileversionsthrough_set.filter(basefilenode=file).first() + def update_metadata(self, metadata, save=True): self.metadata.update(metadata) # metadata has no defined structure so only attempt to set attributes @@ -806,3 +833,15 @@ def serialize_waterbutler_settings(self, node_id, root_id): class Meta: ordering = ('-created',) + + +class BaseFileVersionsThrough(models.Model): + basefilenode = models.ForeignKey(BaseFileNode, db_index=True) + fileversion = models.ForeignKey(FileVersion, db_index=True) + version_name = models.TextField(blank=True) + + class Meta: + unique_together = (('basefilenode', 'fileversion'),) + index_together = ( + ('basefilenode', 'fileversion', ) + ) diff --git a/package.json b/package.json index c5f42c3ccf2..bc9777b8ec4 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "OSF", - "version": "19.25.0", + "version": "19.26.0", "description": "Facilitating Open Science", "repository": "https://github.com/CenterForOpenScience/osf.io", "author": "Center for Open Science", diff --git a/tests/test_addons.py b/tests/test_addons.py index f5b94be4730..291207e0daf 100644 --- a/tests/test_addons.py +++ b/tests/test_addons.py @@ -171,7 +171,7 @@ def test_action_downloads_marks_version_as_seen(self): # Add a new version, make sure that does not have a record version = FileVersionFactory() - test_file.versions.add(version) + test_file.add_version(version) test_file.save() versions = test_file.versions.order_by('created') @@ -738,7 +738,7 @@ def get_test_file(self): materialized_path='/test/Test', ) ret.save() - ret.versions.add(version) + ret.add_version(version) return ret def get_second_test_file(self): @@ -751,7 +751,7 @@ def get_second_test_file(self): materialized_path='/test/Test2', ) ret.save() - ret.versions.add(version) + ret.add_version(version) return ret def get_uppercased_ext_test_file(self): @@ -764,7 +764,7 @@ def get_uppercased_ext_test_file(self): materialized_path='/test/Test2', ) ret.save() - ret.versions.add(version) + ret.add_version(version) return ret def get_ext_test_file(self): @@ -777,7 +777,7 @@ def get_ext_test_file(self): materialized_path='/test/Test2', ) ret.save() - ret.versions.add(version) + ret.add_version(version) return ret def get_mako_return(self): diff --git a/tests/test_websitefiles.py b/tests/test_websitefiles.py index e4e0c5231f4..ab51f807227 100644 --- a/tests/test_websitefiles.py +++ b/tests/test_websitefiles.py @@ -12,6 +12,7 @@ from tests.base import OsfTestCase from osf_tests.factories import AuthUserFactory, ProjectFactory from website.files import exceptions +from website.files.utils import attach_versions from osf import models @@ -495,7 +496,7 @@ def test_get_version(self): ) file.save() - file.versions.add(*[v1, v2]) + attach_versions(file, [v1, v2]) assert_equals(file.get_version('1'), v1) assert_equals(file.get_version('2', required=True), v2) @@ -519,7 +520,7 @@ def test_update_version_metadata(self): file.save() - file.versions.add(v1) + file.add_version(v1) file.update_version_metadata(None, {'size': 1337}) with assert_raises(exceptions.VersionNotFoundError): diff --git a/website/files/utils.py b/website/files/utils.py index 022fcfc8706..cff82293458 100644 --- a/website/files/utils.py +++ b/website/files/utils.py @@ -6,6 +6,7 @@ def copy_files(src, target_node, parent=None, name=None): :param Folder parent: The parent of to attach the clone of src to, if applicable """ assert not parent or not parent.is_file, 'Parent must be a folder' + renaming = src.name != name cloned = src.clone() cloned.parent = parent @@ -14,20 +15,26 @@ def copy_files(src, target_node, parent=None, name=None): cloned.copied_from = src cloned.save() - if src.is_file and src.versions.exists(): fileversions = src.versions.select_related('region').order_by('-created') most_recent_fileversion = fileversions.first() if most_recent_fileversion.region and most_recent_fileversion.region != target_node.osfstorage_region: # add all original version except the most recent - cloned.versions.add(*fileversions[1:]) + attach_versions(cloned, fileversions[1:], src) # create a new most recent version and update the region before adding new_fileversion = most_recent_fileversion.clone() new_fileversion.region = target_node.osfstorage_region new_fileversion.save() - cloned.versions.add(new_fileversion) + attach_versions(cloned, [new_fileversion], src) else: - cloned.versions.add(*src.versions.all()) + attach_versions(cloned, src.versions.all(), src) + + if renaming: + latest_version = cloned.versions.first() + node_file_version = latest_version.get_basefilenode_version(cloned) + # If this is a copy and a rename, update the name on the through table + node_file_version.version_name = cloned.name + node_file_version.save() # copy over file metadata records if cloned.provider == 'osfstorage': @@ -40,3 +47,13 @@ def copy_files(src, target_node, parent=None, name=None): copy_files(child, target_node, parent=cloned) return cloned + +def attach_versions(file, versions_list, src=None): + """ + Loops through all versions in the versions list and attaches them to the file. + Fetches the name associated with the versions on the original src, if copying. + """ + for version in versions_list: + original_version = version.get_basefilenode_version(src) + name = original_version.version_name if original_version else None + file.add_version(version, name) diff --git a/website/static/js/filepage/revisions.js b/website/static/js/filepage/revisions.js index 0e99d81f625..c938bb68bc0 100644 --- a/website/static/js/filepage/revisions.js +++ b/website/static/js/filepage/revisions.js @@ -218,7 +218,12 @@ var FileRevisionsTable = { } if (file.provider === 'osfstorage' && file.name && index !== 0) { - var parts = file.name.split('.'); + var parts; + if (file.versionNames && file.versionNames.length) { + parts = file.versionNames[index].split('.'); + } else { + parts = file.name.split('.'); + } if (parts.length === 1) { options.displayName = parts[0] + '-' + revision.modified; } else { diff --git a/website/templates/project/view_file.mako b/website/templates/project/view_file.mako index b2876bce2ce..98b9804d648 100644 --- a/website/templates/project/view_file.mako +++ b/website/templates/project/view_file.mako @@ -197,6 +197,7 @@ id: ${file_id | sjson, n}, checkoutUser: ${checkout_user if checkout_user else None | sjson, n}, isPreregCheckout: ${pre_reg_checkout if pre_reg_checkout else False | sjson, n}, + versionNames: ${version_names if version_names else [] | sjson, n}, urls: { %if error is None: render: ${ urls['render'] | sjson, n },