Initial commit

This commit is contained in:
Paul Hitt 2025-04-21 16:36:49 -04:00
commit ae9e37751b
5 changed files with 148 additions and 0 deletions

1
README.md Normal file
View File

@ -0,0 +1 @@
# astrostuff

68
astronn_test.py Normal file
View File

@ -0,0 +1,68 @@
# import everything we need first
from tensorflow.keras import utils
import numpy as np
from sklearn.model_selection import train_test_split
import pylab as plt
from astroNN.models import Galaxy10CNN
from astroNN.datasets import galaxy10
from astroNN.datasets.galaxy10 import galaxy10cls_lookup, galaxy10_confusion
# To load images and labels (will download automatically at the first time)
# First time downloading location will be ~/.astroNN/datasets/
images, labels = galaxy10.load_data()
# To convert the labels to categorical 10 classes
labels = utils.to_categorical(labels, 10)
# Select 10 of the images to inspect
img = None
plt.ion()
print('===================Data Inspection===================')
for counter, i in enumerate(range(np.random.randint(0, labels.shape[0], size=10).shape[0])):
img = plt.imshow(images[i])
plt.title('Class {}: {} \n Random Demo images {} of 10'.format(np.argmax(labels[i]), galaxy10cls_lookup(labels[i]), counter+1))
plt.draw()
plt.pause(2.)
plt.close('all')
print('===============Data Inspection Finished===============')
# To convert to desirable type
labels = labels.astype(np.float32)
images = images.astype(np.float32)
# Split the dataset into training set and testing set
train_idx, test_idx = train_test_split(np.arange(labels.shape[0]), test_size=0.1)
train_images, train_labels, test_images, test_labels = images[train_idx], labels[train_idx], images[test_idx], labels[test_idx]
# To create a neural network instance
galaxy10net = Galaxy10CNN()
# set maximium epochs the neural network can run, set 5 to get quick result
galaxy10net.max_epochs = 5
# To train the nerual net
# astroNN will normalize the data by default
galaxy10net.train(train_images, train_labels)
# print model summary before training
galaxy10net.keras_model.summary()
# After the training, you can test the neural net performance
# Please notice predicted_labels are labels predicted from neural network. test_labels are ground truth from the dataset
predicted_labels = galaxy10net.test(test_images)
# Convert predicted_labels to class
prediction_class = np.argmax(predicted_labels, axis=1)
# Convert test_labels to class
test_class = np.argmax(test_labels, axis=1)
# Prepare a confusion matrix
confusion_matrix = np.zeros((10,10))
# create the confusion matrix
for counter, i in enumerate(prediction_class):
confusion_matrix[i, test_class[counter]] += 1
# Plot the confusion matrix
galaxy10_confusion(confusion_matrix)

5
requirements.txt Normal file
View File

@ -0,0 +1,5 @@
matplotlib
astroNN
tensorflow-macos
tensorflow-metal
tensorflow_datasets

13
setup_environment.sh Executable file
View File

@ -0,0 +1,13 @@
# Install Xcode tools
#xcode-select --install
# Download and install miniforge
wget https://github.com/conda-forge/miniforge/releases/download/4.13.0-1/Miniforge3-MacOSX-arm64.sh
chmod +x Miniforge3-MacOSX-arm64.sh
./Miniforge3-MacOSX-arm64.sh -u
# Install things with Conda
conda config --set auto_activate_base true
conda install -c apple tensorflow-deps conda-forge pandas jupyter scikit-learn -y
pip3 install -r requirements.txt

61
tensorflow_m1_test.py Normal file
View File

@ -0,0 +1,61 @@
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
tf.enable_v2_behavior()
from tensorflow.python.framework.ops import disable_eager_execution
disable_eager_execution()
(ds_train, ds_test), ds_info = tfds.load(
'mnist',
split=['train', 'test'],
shuffle_files=True,
as_supervised=True,
with_info=True,
)
def normalize_img(image, label):
"""Normalizes images: `uint8` -> `float32`."""
return tf.cast(image, tf.float32) / 255., label
batch_size = 128
ds_train = ds_train.map(
normalize_img, num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds_train = ds_train.cache()
ds_train = ds_train.shuffle(ds_info.splits['train'].num_examples)
ds_train = ds_train.batch(batch_size)
ds_train = ds_train.prefetch(tf.data.experimental.AUTOTUNE)
ds_test = ds_test.map(
normalize_img, num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds_test = ds_test.batch(batch_size)
ds_test = ds_test.cache()
ds_test = ds_test.prefetch(tf.data.experimental.AUTOTUNE)
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(32, kernel_size=(3, 3),
activation='relu'),
tf.keras.layers.Conv2D(64, kernel_size=(3, 3),
activation='relu'),
tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),
# tf.keras.layers.Dropout(0.25),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='relu'),
# tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(
loss='sparse_categorical_crossentropy',
optimizer=tf.keras.optimizers.Adam(0.001),
metrics=['accuracy'],
)
model.fit(
ds_train,
epochs=12,
validation_data=ds_test,
)