PSI - Issue 75
Laurent Gornet et al. / Procedia Structural Integrity 75 (2025) 129–139 Author name / Structural Integrity Procedia (2025)
138 10
Appendix B. Appendix A. The code of the Gornet- fatigue’s model (Python, TensorFlow, Keras)
import numpy as np import tensorflow as tf import matplotlib.pyplot as plt tf.random.set_seed(42) # === Experimental data composite quasi iso === logN_data = np.array([ 0.0000000001, 3.82979, 4.76596, 5.00304, 5.30699, 5.38602, 5.53799, 5.59878, 5.90881, 6.26748, 6.44985, 6.70517], dtype=np.float32).reshape(-1, 1) N = 10 ** logN_data S = np.array([699, 460.26, 460.26, 464.85, 441.89, 418.94, 439.6, 451.0, 430.42, 401.72, 418.94, 370.73], dtype=np.float32).reshape(-1, 1)
logN = np.log10(N + 1e-6).reshape(-1, 1) logS = np.log10(S + 1e-6).reshape(-1, 1) logN_min, logN_max = 0.0, 9.0 logN_norm = (logN - logN_min) / (logN_max - logN_min)
# === Neural Networks === class PINN(tf.keras.Model): def __init__(self): super().__init__()
self.hidden1 = tf.keras.layers.Dense(20, activation='tanh') self.hidden2 = tf.keras.layers.Dense(10, activation='tanh') self.output_layer = tf.keras.layers.Dense(1) def call(self, x):
x = self.hidden1(x) x = self.hidden2(x) return self.output_layer(x)
model = PINN() optimizer = tf.keras.optimizers.Adam(learning_rate=0.01) loss_history = [] # === Constraints in two points === logN_start = tf.constant([[0.]], dtype=tf.float32) logN_end = tf.constant([[8.5]], dtype=tf.float32)
logN_start_norm = (logN_start - logN_min) / (logN_max - logN_min) logN_end_norm = (logN_end - logN_min) / (logN_max - logN_min) # === Limit of fatigue === S_lim = tf.constant([[370.0]], dtype=tf.float32) logS_lim = tf.math.log(S_lim + 1e-6) / tf.math.log(tf.constant(10.0, dtype=tf.float32)) # === Training === for epoch in range(6000): with tf.GradientTape() as tape: logS_pred = model(logN_norm, training=True) # Initial tangent
with tf.GradientTape() as tape_start: tape_start.watch(logN_start_norm) S_start = model(logN_start_norm) dS_dN_start = tape_start.gradient(S_start, logN_start_norm) # Final value S_end = model(logN_end_norm) # Final tangent with tf.GradientTape() as tape_end: tape_end.watch(logN_end_norm) S_end_tangent = model(logN_end_norm) dS_dN_end = tape_end.gradient(S_end_tangent, logN_end_norm) # Cost function
Made with FlippingBook flipbook maker