[ python 2.7 ] Neural Net ?

Status
Niet open voor verdere reacties.

blua tigro

Gebruiker
Lid geworden
21 apr 2009
Berichten
48
ik poog n NN te schijven in python 2.7

ik krijg n vreemde foutmelding met dit
help aub
Code:
# bluatigro 2 dec 2017
# ann lib

import random
import math

class Neuron() :

    def __init__( self , inputmax ) :
        self.output = 0.0
        self.g = 0.0
        self.wg = 0.0
        self.dv = list()
        self.w = list()
        self.inputmax = no 
        for i in xrange( no ) :
            self.w.append( random.random() )
            self.dv.append( random.random() )

class Layer() :

    def __init__( self , inputmax , neuronmax ) :
        self.inputmax = inputmax
        self.ns = list()
        self.nmax = neuronmax
        self.linput = list()
        for i in xrange( neuronmax ) :
            self.ns.append( Neuron( inputmax ) )
        for i in xrange( inputmax ) :
            self.linput.append( float() )

    def calculate( self ) :
        for i in xrange( len( self.ns ) ) :
            adder = 0.0
            for j in xrange( len( self.linput ) ) :
                adder += ( self.ns[ i ].weight[ j ]
                * self.linput[ j ] )
                adder += ( self.ns[ i ].wg
                * self.ns[ i ] )
            self.ns[ i ].output = (
            1.0 / ( 1.0 + math.exp( -adder / self.nmax ) ) )
                
class NeuralNet() :

    def __init__( self , size , layers ) :
        self.size = size
        self.lmax = layers
        self.ls = list()
        for i in xrange( layers ) :
            self.ls.append( Layer( size , size ) )

    def propegate( self , inlst ) :
        for i in xrange( len( inlst ) ) :
            self.ls[ 0 ].n[ i ].output = inlist[ i ]
        self.ls[ 0 ].calculate()
        for i in xrange( 1 , len( self.lmax ) ) :
            for j in xrange( self.size ) :
                uit = self.ls[ i ].ns[ j ].output
                self.ls[ i ].linput[ j ] = uit
            self.ls[ i ].calculate()
                
    def train( self , whised , inlst , a , m ) :
        self.propegate( inlst )
        for i in xrange( len( wished ) ) :
            uitput = self.ls[ self.layermax ].ns[ i ].output
            errorg += ( wished[ i ] - uitput ) ** 2
        for i in xrange( self.lmax , 0 , -1 ) :
            for j in xrange( self.size ) :
                uitput = self.ls[ i ].ns[ j ].output
                errorc = uitput * ( 1 - uitput ) * adder
                for k in xrange( self.size ) :
                    delta = self.ls[i].ns[j].dv[k]
* * * *             u = a * errorc * self.ls[i].linput[k]
* * * *             udelta = u + delta * m
                    self.ls[i].ns[j].w[k] += udelta
* * * *             self.ls[i].ns[j].dv[k] = udelta
* * * *             csum += self.ls[i].ns[j].w[k] * errorc
                    g = self.ls[i].ns[j].g
                    self.ls[i].ns[j].wg += a * errorc * g  
* *         adder = csum
* *         csum = 0

        return errorg / 2

ANN = NeuralNet( 3 , 3 )

wish = list()

def xor( a , b ) :
    return ( a and not b ) or ( not a and b )

for a in range( 1 ) :
    for b in range( 1 ) :
        for c in range( 1 ) :
            wish.append( float( xor( a , xor( b , c ) ) ) 

for epog in xrange( 1000 ) :
    invoer = [ 0.0 , 0.0 , 0.0 ]
    fout = 0.0
    for a in range( 1 ) :
        invoer[ 0 ] = float( a )
        for b in range( 1 ) :
            invoer[ 1 ] = float( b )
            for c in range( 1 ) :
                invoer[ 2 ] = float( c )
                fout += ANN.train( wish , invoer , .1 , .1 )
    print fout
 
-

het was n syntacs error

die heb ik verwijderd

error :
ik zie aleen 0.0 als fout
Code:
# bluatigro 2 dec 2017
# ann lib

import random
import math

class Neuron() :

    def __init__( self , no ) :
        self.output = 0.0
        self.g = 0.0
        self.wg = 0.0
        self.dv = list()
        self.w = list()
        self.inputmax = no
        for i in xrange( no ) :
            self.w.append( random.random() )
            self.dv.append( random.random() )

class Layer() :

    def __init__( self , inputmax , neuronmax ) :
        self.inputmax = inputmax
        self.ns = list()
        self.nmax = neuronmax
        self.linput = list()
        for i in xrange( neuronmax ) :
            self.ns.append( Neuron( inputmax ) )
        for i in xrange( inputmax ) :
            self.linput.append( float() )

    def calculate( self ) :
        for i in xrange( len( self.ns ) ) :
            adder = 0.0
            for j in xrange( len( self.linput ) ) :
                adder += ( self.ns[ i ].w[ j ]
                * self.linput[ j ] )
                adder += ( self.ns[ i ].wg
                * self.ns[ i ].g )
            self.ns[ i ].output = (
            1.0 / ( 1.0 + math.exp( -adder / self.nmax ) ) )
                
class NeuralNet() :

    def __init__( self , size , layers ) :
        self.size = size
        self.lmax = layers
        self.ls = list()
        for i in xrange( layers + 1 ) :
            self.ls.append( Layer( size , size ) )

    def propegate( self , inlst ) :
        for i in xrange( len( inlst ) ) :
            self.ls[ 0 ].ns[ i ].output = inlst[ i ]
        self.ls[ 0 ].calculate()
        for i in xrange( 1 , self.lmax ) :
            for j in xrange( self.size ) :
                uit = self.ls[ i ].ns[ j ].output
                self.ls[ i ].linput[ j ] = uit
            self.ls[ i ].calculate()
                
    def train( self , wished , inlst , a , m ) :
        self.propegate( inlst )
        errorg = 0.0
        adder = 0.0
        csum = 0.0
        for i in xrange( len( wished ) ) :
            uitput = self.ls[ self.lmax ].ns[ i ].output
            errorg += ( wished[ i ] - uitput ) ** 2
        for i in xrange( self.lmax , 0 , -1 ) :
            for j in xrange( self.size ) :
                uitput = self.ls[ i ].ns[ j ].output
                errorc = uitput * ( 1 - uitput ) * adder
                for k in xrange( self.size ) :
                    delta = self.ls[i].ns[j].dv[k]
                    u = a * errorc * self.ls[i].linput[k]
                    udelta = u + delta * m
                    self.ls[i].ns[j].w[k] += udelta
                    self.ls[i].ns[j].dv[k] = udelta
                    csum += self.ls[i].ns[j].w[k] * errorc
                g = self.ls[i].ns[j].g
                self.ls[i].ns[j].wg += a * errorc * g  
            adder = csum
            csum = 0

        return errorg / 2

ANN = NeuralNet( 3 , 3 )

wish = list()

def xor( a , b ) :
    return ( a and not b ) or ( not a and b )

wish.append( float() )
            
for epog in xrange( 1000 ):
    invoer = [ 0.0 , 0.0 , 0.0 ]
    fout = 0.0
    for a in range( 1 ) :
        invoer[ 0 ] = float( a )
        for b in range( 1 ) :
            invoer[ 1 ] = float( b )
            for c in range( 1 ) :
                invoer[ 2 ] = float( c )
                wish[ 0 ] = float( xor( a , xor( b , c ) ) )
                fout += ANN.train( wish , invoer , .1 , .1 )
    print epog , "   " , fout

print "[ game over ]"
 
Je verwacht nog steeds dat ik wel even je code ga draaien en begrijp wat je bedoelt.
 
Status
Niet open voor verdere reacties.
Steun Ons

Nieuwste berichten

Terug
Bovenaan Onderaan