Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Introduced tf-keras for compatibility with tensorflow>2.15 #239

Merged
merged 2 commits into from
Jul 26, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 10 additions & 10 deletions mhcflurry/class1_neural_network.py
Original file line number Diff line number Diff line change
Expand Up @@ -217,7 +217,7 @@ def borrow_cached_network(klass, network_json, network_weights):
if key not in klass.KERAS_MODELS_CACHE:
# Cache miss.
configure_tensorflow()
from tensorflow.keras.models import model_from_json
from tf_keras.models import model_from_json

network = model_from_json(network_json)
existing_weights = None
Expand Down Expand Up @@ -258,7 +258,7 @@ def network(self, borrow=False):
)
else:
configure_tensorflow()
from tensorflow import keras
import tf_keras as keras

# Hack to fix an issue caused by a change introduced in
# tensorflow 2.3.0, in which our models fit using tensorflow 2.2
Expand Down Expand Up @@ -568,7 +568,7 @@ def fit_generator(
progress_print_interval : float
"""
configure_tensorflow()
from tensorflow.keras import backend as K
from tf_keras import backend as K

fit_info = collections.defaultdict(list)

Expand Down Expand Up @@ -775,7 +775,7 @@ def fit(
disable.
"""
configure_tensorflow()
from tensorflow.keras import backend as K
from tf_keras import backend as K

encodable_peptides = EncodableSequences.create(peptides)
peptide_encoding = self.peptides_to_network_input(encodable_peptides)
Expand Down Expand Up @@ -1192,9 +1192,9 @@ def merge(cls, models, merge_method="average"):

"""
configure_tensorflow()
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Input, average, add, concatenate
from tensorflow.keras.models import Model
from tf_keras import backend as K
from tf_keras.layers import Input, add, average, concatenate
from tf_keras.models import Model

if len(models) == 1:
return models[0]
Expand Down Expand Up @@ -1337,8 +1337,8 @@ def make_network(
# We import keras here to avoid tensorflow debug output, etc. unless we
# are actually about to use Keras.
configure_tensorflow()
from tensorflow import keras
from tensorflow.keras.layers import (
import tf_keras as keras
from tf_keras.layers import (
Input,
Dense,
Flatten,
Expand Down Expand Up @@ -1500,7 +1500,7 @@ def set_allele_representations(self, allele_representations, force_surgery=False
m is the length of the vectors used to represent amino acids
"""
configure_tensorflow()
from tensorflow.keras.models import clone_model
from tf_keras.models import clone_model

reshaped = allele_representations.reshape(
(
Expand Down
9 changes: 4 additions & 5 deletions mhcflurry/class1_processing_neural_network.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ def network(self):
if self._network is None and self.network_json is not None:
# NOTE
# Instead of calling:
# from tensorflow.keras.models import model_from_json
# from tf_keras.models import model_from_json
# self._network = model_from_json(self.network_json)
# We are re-creating the network here using the hyperparameters.
# This is because the network uses Lambda layers, which break
Expand Down Expand Up @@ -399,17 +399,16 @@ def make_network(
# We import keras here to avoid tensorflow debug output, etc. unless we
# are actually about to use Keras.
configure_tensorflow()
from tensorflow import keras
from keras.layers import (
from tf_keras.layers import (
Input,
Dense,
Dropout,
Concatenate,
Conv1D,
Lambda,
)
from keras.models import Model
from keras import regularizers, initializers
from tf_keras.models import Model
from tf_keras import regularizers, initializers

model_inputs = {}

Expand Down
7 changes: 6 additions & 1 deletion mhcflurry/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,12 @@ def configure_tensorflow(backend=None, gpu_device_nums=None, num_threads=None):

"""
import tensorflow as tf


# mhcflurry models use keras 2. Tensorflow now defaults to keras 3, so to load these
# old models, we need to set the environment variable to use legacy keras. Ideally,
# these models such be regenerated with keras 3.
os.environ["TF_USE_LEGACY_KERAS"] = "1"

global TENSORFLOW_CONFIGURED

if TENSORFLOW_CONFIGURED:
Expand Down
4 changes: 2 additions & 2 deletions mhcflurry/custom_loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ def loss(self, y_true, y_pred):

def get_keras_loss(self, reduction="sum_over_batch_size"):
configure_tensorflow()
from tensorflow.keras.losses import LossFunctionWrapper
from tf_keras.losses import LossFunctionWrapper
return LossFunctionWrapper(
self.loss, reduction=reduction, name=self.name)

Expand Down Expand Up @@ -169,7 +169,7 @@ def loss(self, y_true, y_pred):
configure_tensorflow()
import tensorflow as tf

# from tensorflow.keras import backend as K
# from tf_keras import backend as K
y_true = tf.reshape(y_true, [-1])
y_pred = tf.reshape(y_pred, [-1])

Expand Down
4 changes: 2 additions & 2 deletions mhcflurry/data_dependent_weights_initialization.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ def svd_orthonormal(shape):

def get_activations(model, layer, X_batch):
configure_tensorflow()
from tensorflow.keras.models import Model
from tf_keras.models import Model
intermediate_layer_model = Model(
inputs=model.input,
outputs=layer.get_output_at(0)
Expand Down Expand Up @@ -93,7 +93,7 @@ def lsuv_init(model, batch, verbose=True, margin=0.1, max_iter=100):
Same as what was passed in.
"""
configure_tensorflow()
from tensorflow.keras.layers import Dense, Convolution2D
from tf_keras.layers import Dense, Convolution2D
needed_variance = 1.0
layers_inintialized = 0
for layer in model.layers:
Expand Down
3 changes: 2 additions & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
six
pandas>=0.20.3
tensorflow>=2.12.0,<2.16.0
tensorflow>=2.15.0,<2.17.0
tf-keras
appdirs
scikit-learn
mhcgnomes
Expand Down
3 changes: 2 additions & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,8 @@
"mhcgnomes>=0.8.4",
"pyyaml",
"tqdm",
"tensorflow>=2.12.0,<2.16.0",
"tensorflow>=2.15.0,<2.17.0",
"tf-keras"
]

setup(
Expand Down
Loading