import numpy as np
from sklearn.datasets import load_iris
= load_iris()
iris
= np.where(iris.target > 0)
X_idx = iris.data[X_idx]
X = iris.target[X_idx] Y
2 Quantum Neural Networks
We already know that there could be blocks encoding our data (feature maps), blocks containing free parameters (ansatz) and how those parameters can be trained. Let’s see if we can work out a full example.
from sklearn.preprocessing import MinMaxScaler
= MinMaxScaler()
scaler = scaler.fit_transform(X) X_scaled
from sklearn.model_selection import train_test_split
= X_scaled.shape[1]
features = train_test_split(X_scaled, Y, test_size=0.33, random_state=42) X_train, X_test, y_train, y_test
Done! Now we can start creating out QNN structure. First we need something to embed the data into a quantum state.
from squlearn.encoding_circuit import YZ_CX_EncodingCircuit
= YZ_CX_EncodingCircuit(num_qubits = features, num_features = features, num_layers = 2)
feature_map "mpl", style="clifford") feature_map.draw(
Now we will select a fully parameterized circuit (no data embedded).
from squlearn.encoding_circuit import LayeredEncodingCircuit
from squlearn.encoding_circuit.layered_encoding_circuit import Layer
# Template
= LayeredEncodingCircuit(num_qubits=features, num_features=features)
ansatz
# Create the layer
= Layer(ansatz)
layer "p")
layer.Ry("NN") # Entangling block
layer.cx_entangling(
=1)
ansatz.add_layer(layer,num_layers"mpl", style="clifford") ansatz.draw(
Now we have composed the \(U_{\theta_1}(x)\) feature map and \(V(\theta_2)\) parameterized circuit that produce the state \(|\Psi(x, \theta)\rangle\). By measuring the observable \(O\) we can then evaluate the put of the QNN structure as
\[ f(x,\theta) = \langle \Psi(x, \theta) | O |\Psi(x, \theta)\rangle \]
= feature_map + ansatz
qnn_structure 'mpl', style="clifford") qnn_structure.draw(
A single \(Z\) observable over a specific qubit may do the trick. At the end, we know it will return either 1 or -1 so we are fine for binary classiciation tasks.
from squlearn.observables import CustomObservable
= CustomObservable(num_qubits=features, operator_string="IIIZ",parameterized=True)
observable print(observable)
SparsePauliOp(['IIIZ'],
coeffs=[ParameterExpression(1.0*p[0])])
And with that we just neet to select our simulation mechanism and optimization routine to start training our model.
from squlearn.qnn import QNNClassifier, SquaredLoss
from squlearn.optimizers import Adam
from squlearn import Executor
= QNNClassifier(qnn_structure, observable, Executor("statevector_simulator"), SquaredLoss(), Adam())
qnn qnn.fit(X_train, y_train)
fit: 100%|██████████| 100/100 [17:03<00:00, 10.23s/it]
from sklearn.metrics import classification_report
= qnn.predict(X_test)
y_pred print(classification_report(y_test, y_pred))
precision recall f1-score support
1 0.82 0.74 0.78 19
2 0.69 0.79 0.73 14
accuracy 0.76 33
macro avg 0.76 0.76 0.76 33
weighted avg 0.77 0.76 0.76 33
Cool! You can check other compositions, like the Data Re-Uploading one or othes used in different benchmarks in the literature (Pérez-Salinas et al. 2020; Bowles, Ahmed, and Schuld 2024).
from squlearn.encoding_circuit import HubregtsenEncodingCircuit
= HubregtsenEncodingCircuit(num_qubits = features, num_features = features, num_layers = 4)
full_pqc "mpl", style="clifford") full_pqc.draw(
= QNNClassifier(full_pqc, observable, Executor("statevector_simulator"), SquaredLoss(), Adam())
qnn
qnn.fit(X_train, y_train)
= qnn.predict(X_test)
y_pred print(classification_report(y_test, y_pred))
fit: 100%|██████████| 100/100 [04:37<00:00, 2.77s/it]
precision recall f1-score support
1 0.94 0.84 0.89 19
2 0.81 0.93 0.87 14
accuracy 0.88 33
macro avg 0.88 0.89 0.88 33
weighted avg 0.89 0.88 0.88 33