Exploring Hidden Semantics in Neural Networks with Symbolic Regression
Created by W.Langdon from
gp-bibliography.bib Revision:1.8051
- @InProceedings{luo:2022:GECCO,
-
author = "Yuanzhen Luo and Qiang Lu and Xilei Hu and
Jake Luo and Zhiguang Wang",
-
title = "Exploring Hidden Semantics in Neural Networks with
Symbolic Regression",
-
booktitle = "Proceedings of the 2022 Genetic and Evolutionary
Computation Conference",
-
year = "2022",
-
editor = "Alma Rahat and Jonathan Fieldsend and
Markus Wagner and Sara Tari and Nelishia Pillay and Irene Moser and
Aldeida Aleti and Ales Zamuda and Ahmed Kheiri and
Erik Hemberg and Christopher Cleghorn and Chao-li Sun and
Georgios Yannakakis and Nicolas Bredeche and
Gabriela Ochoa and Bilel Derbel and Gisele L. Pappa and
Sebastian Risi and Laetitia Jourdan and
Hiroyuki Sato and Petr Posik and Ofer Shir and Renato Tinos and
John Woodward and Malcolm Heywood and Elizabeth Wanner and
Leonardo Trujillo and Domagoj Jakobovic and
Risto Miikkulainen and Bing Xue and Aneta Neumann and
Richard Allmendinger and Inmaculada Medina-Bulo and
Slim Bechikh and Andrew M. Sutton and
Pietro Simone Oliveto",
-
pages = "982--990",
-
address = "Boston, USA",
-
series = "GECCO '22",
-
month = "9-13 " # jul,
-
organisation = "SIGEVO",
-
publisher = "Association for Computing Machinery",
-
publisher_address = "New York, NY, USA",
-
keywords = "genetic algorithms, genetic programming, Cartesian
Genetic Programming, neural network, ANN, symbolic
regression, Lime, Maple, SRNet, USDB",
-
isbn13 = "978-1-4503-9237-2",
-
URL = "https://arxiv.org/abs/2204.10529",
-
DOI = "doi:10.1145/3512290.3528758",
-
size = "9 pages",
-
abstract = "Many recent studies focus on developing mechanisms to
explain the black-box behaviors of neural networks
(NNs). However, little work has been done to extract
the potential hidden semantics (mathematical
representation) of a neural network. A succinct and
explicit mathematical representation of a ANN model
could improve the understanding and interpretation of
its behaviors. To address this need, we propose a novel
symbolic regression method for neural works (called
SRNet) to discover the mathematical expressions of a
NN. SRNet creates a Cartesian genetic programming
(NNCGP) to represent the hidden semantics of a single
layer in a NN. It then leverages a multi-chromosome
NNCGP to represent hidden semantics of all layers of
the NN. The method uses a (1+λ) evolutionary strategy
(called MNNCGP-ES) to extract the final mathematical
expressions of all layers in the NN. Experiments on 12
symbolic regression benchmarks and 5 classification
benchmarks show that SRNet not only can reveal the
complex relationships between each layer of a NN but
also can extract the mathematical representation of the
whole NN. Compared with LIME and MAPLE, SRNet has
higher interpolation accuracy and trends to approximate
the real model on the practical dataset",
-
notes = "GECCO-2022 A Recombination of the 31st International
Conference on Genetic Algorithms (ICGA) and the 27th
Annual Genetic Programming Conference (GP)",
- }
Genetic Programming entries for
Yuanzhen Luo
Qiang Lu
Xilei Hu
Jake Luo
Zhiguang Wang
Citations