more protocol/service refactor

- removed protocol.Event: CNPeers now send protocol.PacketEvents
- peer uData is held in CNPeer, use SetUserData() and UserData() to
set/read it
- Service.PacketHandler calback has changed, removed uData:
switched calls to peer.SetUserData() and peer.UserData() where appropriate
- service.Service lots of tidying up, removed dependence on old
protocol.Event.
- service.Service && protocol.CNPeer now accept a cancelable context.
hooray graceful shutdowns and unit tests!
- general cleanup
This commit is contained in:
CPunch 2023-12-01 00:56:34 -06:00
parent c0ba365cf5
commit f4b17906ce
12 changed files with 292 additions and 261 deletions

View File

@ -31,7 +31,7 @@ func (s *loginCommand) SetFlags(f *flag.FlagSet) {
} }
func (s *loginCommand) Execute(ctx context.Context, f *flag.FlagSet, _ ...interface{}) subcommands.ExitStatus { func (s *loginCommand) Execute(ctx context.Context, f *flag.FlagSet, _ ...interface{}) subcommands.ExitStatus {
loginServer, err := login.NewLoginServer(dbHndlr, redisHndlr, s.port) loginServer, err := login.NewLoginServer(ctx, dbHndlr, redisHndlr, s.port)
if err != nil { if err != nil {
log.Panicf("failed to create shard server: %v", err) log.Panicf("failed to create shard server: %v", err)
} }

View File

@ -31,7 +31,7 @@ func (s *shardCommand) SetFlags(f *flag.FlagSet) {
} }
func (s *shardCommand) Execute(ctx context.Context, f *flag.FlagSet, _ ...interface{}) subcommands.ExitStatus { func (s *shardCommand) Execute(ctx context.Context, f *flag.FlagSet, _ ...interface{}) subcommands.ExitStatus {
shardServer, err := shard.NewShardServer(dbHndlr, redisHndlr, s.port) shardServer, err := shard.NewShardServer(ctx, dbHndlr, redisHndlr, s.port)
if err != nil { if err != nil {
log.Panicf("failed to create shard server: %v", err) log.Panicf("failed to create shard server: %v", err)
} }

View File

@ -1,10 +1,11 @@
package protocol package protocol
import ( import (
"bytes"
"context"
"encoding/binary" "encoding/binary"
"fmt" "fmt"
"io" "io"
"log"
"net" "net"
"sync/atomic" "sync/atomic"
"time" "time"
@ -15,9 +16,17 @@ const (
USE_FE USE_FE
) )
type PacketEvent struct {
Type int
Pkt *bytes.Buffer
PktID uint32
}
// CNPeer is a simple wrapper for net.Conn connections to send/recv packets over the Fusionfall packet protocol. // CNPeer is a simple wrapper for net.Conn connections to send/recv packets over the Fusionfall packet protocol.
type CNPeer struct { type CNPeer struct {
uData interface{}
conn net.Conn conn net.Conn
ctx context.Context
whichKey int whichKey int
alive *atomic.Bool alive *atomic.Bool
@ -32,9 +41,10 @@ func GetTime() uint64 {
return uint64(time.Now().UnixMilli()) return uint64(time.Now().UnixMilli())
} }
func NewCNPeer(conn net.Conn) *CNPeer { func NewCNPeer(ctx context.Context, conn net.Conn) *CNPeer {
p := &CNPeer{ p := &CNPeer{
conn: conn, conn: conn,
ctx: ctx,
whichKey: USE_E, whichKey: USE_E,
alive: &atomic.Bool{}, alive: &atomic.Bool{},
@ -45,6 +55,14 @@ func NewCNPeer(conn net.Conn) *CNPeer {
return p return p
} }
func (peer *CNPeer) SetUserData(uData interface{}) {
peer.uData = uData
}
func (peer *CNPeer) UserData() interface{} {
return peer.uData
}
func (peer *CNPeer) Send(typeID uint32, data ...interface{}) error { func (peer *CNPeer) Send(typeID uint32, data ...interface{}) error {
// grab buffer from pool // grab buffer from pool
buf := GetBuffer() buf := GetBuffer()
@ -82,7 +100,7 @@ func (peer *CNPeer) Send(typeID uint32, data ...interface{}) error {
EncryptData(buf.Bytes()[4:], key) EncryptData(buf.Bytes()[4:], key)
// send full packet // send full packet
log.Printf("Sending %#v, sizeof: %d, buffer: %v", data, buf.Len(), buf.Bytes()) // log.Printf("Sending %#v, sizeof: %d, buffer: %v", data, buf.Len(), buf.Bytes())
if _, err := peer.conn.Write(buf.Bytes()); err != nil { if _, err := peer.conn.Write(buf.Bytes()); err != nil {
return fmt.Errorf("failed to write packet body! %v", err) return fmt.Errorf("failed to write packet body! %v", err)
} }
@ -99,50 +117,52 @@ func (peer *CNPeer) Kill() {
return return
} }
log.Printf("Killing peer %p", peer)
peer.conn.Close() peer.conn.Close()
} }
// meant to be invoked as a goroutine // meant to be invoked as a goroutine
func (peer *CNPeer) Handler(eRecv chan<- *Event) error { func (peer *CNPeer) Handler(eRecv chan<- *PacketEvent) error {
defer func() { defer func() {
eRecv <- &Event{Type: EVENT_CLIENT_DISCONNECT, Peer: peer}
close(eRecv) close(eRecv)
peer.Kill() peer.Kill()
}() }()
peer.alive.Store(true) peer.alive.Store(true)
eRecv <- &Event{Type: EVENT_CLIENT_CONNECT, Peer: peer}
for { for {
// read packet size, the goroutine spends most of it's time parked here select {
var sz uint32 case <-peer.ctx.Done():
if err := binary.Read(peer.conn, binary.LittleEndian, &sz); err != nil { return nil
return err default:
// read packet size, the goroutine spends most of it's time parked here
var sz uint32
if err := binary.Read(peer.conn, binary.LittleEndian, &sz); err != nil {
return err
}
// client should never send a packet size outside of this range
if sz > CN_PACKET_BUFFER_SIZE || sz < 4 {
return fmt.Errorf("invalid packet size: %d", sz)
}
// grab buffer && read packet body
buf := GetBuffer()
if _, err := buf.ReadFrom(io.LimitReader(peer.conn, int64(sz))); err != nil {
return fmt.Errorf("failed to read packet body: %v", err)
}
// decrypt
DecryptData(buf.Bytes(), peer.E_key)
pkt := NewPacket(buf)
// create packet && read pktID
var pktID uint32
if err := pkt.Decode(&pktID); err != nil {
return fmt.Errorf("failed to read packet type! %v", err)
}
// dispatch packet
// log.Printf("Got packet ID: %x, with a sizeof: %d\n", pktID, sz)
eRecv <- &PacketEvent{Pkt: buf, PktID: pktID}
} }
// client should never send a packet size outside of this range
if sz > CN_PACKET_BUFFER_SIZE || sz < 4 {
return fmt.Errorf("invalid packet size: %d", sz)
}
// grab buffer && read packet body
buf := GetBuffer()
if _, err := buf.ReadFrom(io.LimitReader(peer.conn, int64(sz))); err != nil {
return fmt.Errorf("failed to read packet body: %v", err)
}
// decrypt
DecryptData(buf.Bytes(), peer.E_key)
pkt := NewPacket(buf)
// create packet && read pktID
var pktID uint32
if err := pkt.Decode(&pktID); err != nil {
return fmt.Errorf("failed to read packet type! %v", err)
}
// dispatch packet
// log.Printf("Got packet ID: %x, with a sizeof: %d\n", pktID, sz)
eRecv <- &Event{Type: EVENT_CLIENT_PACKET, Peer: peer, Pkt: buf, PktID: pktID}
} }
} }

View File

@ -1,16 +0,0 @@
package protocol
import "bytes"
const (
EVENT_CLIENT_DISCONNECT = iota
EVENT_CLIENT_CONNECT
EVENT_CLIENT_PACKET
)
type Event struct {
Type int
Peer *CNPeer
Pkt *bytes.Buffer
PktID uint32
}

View File

@ -1,6 +1,7 @@
package service package service
import ( import (
"context"
"errors" "errors"
"fmt" "fmt"
"log" "log"
@ -13,9 +14,9 @@ import (
"github.com/CPunch/gopenfusion/internal/protocol" "github.com/CPunch/gopenfusion/internal/protocol"
) )
type PacketHandler func(peer *protocol.CNPeer, uData interface{}, pkt protocol.Packet) error type PacketHandler func(peer *protocol.CNPeer, pkt protocol.Packet) error
func StubbedPacket(_ *protocol.CNPeer, _ interface{}, _ protocol.Packet) error { func StubbedPacket(_ *protocol.CNPeer, _ protocol.Packet) error {
return nil return nil
} }
@ -23,22 +24,22 @@ type Service struct {
listener net.Listener listener net.Listener
port int port int
Name string Name string
stop chan struct{} // tell active handleEvents() to stop ctx context.Context
stopped chan struct{}
started chan struct{} started chan struct{}
stopped chan struct{}
packetHandlers map[uint32]PacketHandler packetHandlers map[uint32]PacketHandler
peers map[*protocol.CNPeer]interface{} peers map[chan *protocol.PacketEvent]*protocol.CNPeer
stateLock sync.Mutex stateLock sync.Mutex
// OnDisconnect is called when a peer disconnects from the service. // OnDisconnect is called when a peer disconnects from the service.
// uData is the stored value of the key/value pair in the peer map. // uData is the stored value of the key/value pair in the peer map.
// It may not be set while the service is running. (eg. srvc.Start() has been called) // It may not be set while the service is running. (eg. srvc.Start() has been called)
OnDisconnect func(peer *protocol.CNPeer, uData interface{}) OnDisconnect func(peer *protocol.CNPeer)
// OnConnect is called when a peer connects to the service. // OnConnect is called when a peer connects to the service.
// return value is used as the value in the peer map. // return value is used as the value in the peer map.
// It may not be set while the service is running. (eg. srvc.Start() has been called) // It may not be set while the service is running. (eg. srvc.Start() has been called)
OnConnect func(peer *protocol.CNPeer) (uData interface{}) OnConnect func(peer *protocol.CNPeer)
} }
func RandomPort() (int, error) { func RandomPort() (int, error) {
@ -55,44 +56,52 @@ func RandomPort() (int, error) {
return strconv.Atoi(port) return strconv.Atoi(port)
} }
func NewService(name string, port int) *Service { func NewService(ctx context.Context, name string, port int) *Service {
srvc := &Service{ srvc := &Service{
port: port, port: port,
Name: name, Name: name,
} }
srvc.Reset() srvc.Reset(ctx)
return srvc return srvc
} }
func (service *Service) Reset() { func (srvc *Service) Reset(ctx context.Context) {
service.packetHandlers = make(map[uint32]PacketHandler) srvc.ctx = ctx
service.peers = make(map[*protocol.CNPeer]interface{}) srvc.packetHandlers = make(map[uint32]PacketHandler)
service.started = make(chan struct{}) srvc.peers = make(map[chan *protocol.PacketEvent]*protocol.CNPeer)
srvc.started = make(chan struct{})
srvc.stopped = make(chan struct{})
} }
// may not be called while the service is running (eg. srvc.Start() has been called) // may not be called while the service is running (eg. srvc.Start() has been called)
func (service *Service) AddPacketHandler(pktID uint32, handler PacketHandler) { func (srvc *Service) AddPacketHandler(pktID uint32, handler PacketHandler) {
service.packetHandlers[pktID] = handler srvc.packetHandlers[pktID] = handler
} }
func (service *Service) Start() error { type newPeerConnection struct {
service.stop = make(chan struct{}) peer *protocol.CNPeer
service.stopped = make(chan struct{}) channel chan *protocol.PacketEvent
peerConnections := make(chan chan *protocol.Event) }
go service.handleEvents(peerConnections)
func (srvc *Service) Start() error {
peerConnections := make(chan newPeerConnection)
defer close(peerConnections)
go srvc.handleEvents(peerConnections)
// open listener socket // open listener socket
var err error var err error
service.listener, err = net.Listen("tcp", fmt.Sprintf(":%d", service.port)) srvc.listener, err = net.Listen("tcp", fmt.Sprintf(":%d", srvc.port))
if err != nil { if err != nil {
return err return err
} }
defer srvc.listener.Close()
close(service.started) // signal that the service has started log.Printf("%s service hosted on %s:%d\n", srvc.Name, config.GetAnnounceIP(), srvc.port)
log.Printf("%s service hosted on %s:%d\n", service.Name, config.GetAnnounceIP(), service.port)
close(srvc.started) // signal that the service has started
for { for {
conn, err := service.listener.Accept() conn, err := srvc.listener.Accept()
if err != nil { if err != nil {
fmt.Println(err) fmt.Println(err)
// we expect this to happen when the service is stopped // we expect this to happen when the service is stopped
@ -103,143 +112,148 @@ func (service *Service) Start() error {
} }
// create a new peer and pass it to the event loop // create a new peer and pass it to the event loop
eRecv := make(chan *protocol.Event) peer := protocol.NewCNPeer(srvc.ctx, conn)
peer := protocol.NewCNPeer(conn) eRecv := make(chan *protocol.PacketEvent)
log.Printf("New peer %p connected to %s\n", peer, service.Name) peerConnections <- newPeerConnection{channel: eRecv, peer: peer}
peerConnections <- eRecv
go peer.Handler(eRecv) go peer.Handler(eRecv)
} }
} }
func (srvc *Service) getPeer(channel chan *protocol.PacketEvent) *protocol.CNPeer {
return srvc.peers[channel]
}
func (srvc *Service) setPeer(channel chan *protocol.PacketEvent, peer *protocol.CNPeer) {
srvc.peers[channel] = peer
}
func (srvc *Service) removePeer(channel chan *protocol.PacketEvent) {
delete(srvc.peers, channel)
}
// returns a channel that is closed when the service has started. // returns a channel that is closed when the service has started.
// this is useful if you need to do something after the service has started. // this is useful if you need to wait until after the service has started.
func (service *Service) Started() <-chan struct{} { func (srvc *Service) Started() <-chan struct{} {
return service.started return srvc.started
} }
// returns a channel that is closed when the service has stopped. // returns a channel that is closed when the service has stopped.
// this is useful if you need to wait until the service has completely stopped. // this is useful if you need wait until after the service has stopped.
func (service *Service) Stopped() <-chan struct{} { func (srvc *Service) Stopped() <-chan struct{} {
return service.stopped return srvc.stopped
}
// stops the service and disconnects all peers. OnDisconnect will be called
// for each peer.
func (service *Service) Stop() {
close(service.stop)
service.listener.Close()
}
// returns the stored uData for the peer.
// if the peer does not exist, nil is returned.
// NOTE: the peer map is not locked while accessing, if you're calling this
// outside of the service's event loop, you'll need to lock the peer map yourself.
func (service *Service) GetPeerData(peer *protocol.CNPeer) interface{} {
return service.peers[peer]
}
// sets the stored uData for the peer.
// NOTE: the peer map is not locked while accessing, if you're calling this
// outside of the service's event loop, you'll need to lock the peer map yourself.
func (service *Service) SetPeerData(peer *protocol.CNPeer, uData interface{}) {
service.peers[peer] = uData
} }
// calls f for each peer in the service passing the peer and the stored uData. // calls f for each peer in the service passing the peer and the stored uData.
// if f returns false, the iteration is stopped. // if f returns false, the iteration is stopped.
// NOTE: the peer map is not locked while iterating, if you're calling this // NOTE: the peer map is not locked while iterating, if you're calling this
// outside of the service's event loop, you'll need to lock the peer map yourself. // outside of the service's event loop, you'll need to lock the peer map yourself.
func (service *Service) RangePeers(f func(peer *protocol.CNPeer, uData interface{}) bool) { func (srvc *Service) RangePeers(f func(peer *protocol.CNPeer) bool) {
for peer, uData := range service.peers { for _, peer := range srvc.peers {
if !f(peer, uData) { if !f(peer) {
break break
} }
} }
} }
// locks the peer map. // locks the peer map.
func (service *Service) Lock() { func (srvc *Service) Lock() {
service.stateLock.Lock() srvc.stateLock.Lock()
} }
// unlocks the peer map. // unlocks the peer map.
func (service *Service) Unlock() { func (srvc *Service) Unlock() {
service.stateLock.Unlock() srvc.stateLock.Unlock()
}
func (srvc *Service) stop() {
// OnDisconnect handler might need to do something important
srvc.RangePeers(func(peer *protocol.CNPeer) bool {
peer.Kill()
if srvc.OnDisconnect != nil {
srvc.OnDisconnect(peer)
}
return true
})
log.Printf("%s service stopped\n", srvc.Name)
close(srvc.stopped)
} }
// handleEvents is the main event loop for the service. // handleEvents is the main event loop for the service.
// it handles all events from the peers and calls the appropriate handlers. // it handles all events from the peers and calls the appropriate handlers.
func (service *Service) handleEvents(eRecv <-chan chan *protocol.Event) { func (srvc *Service) handleEvents(peerPipe <-chan newPeerConnection) {
defer srvc.stop()
poll := make([]reflect.SelectCase, 0, 4) poll := make([]reflect.SelectCase, 0, 4)
// add the stop channel and the peer connection channel to our poll queue // add the stop channel and the peer connection channel to our poll queue
poll = append(poll, reflect.SelectCase{ poll = append(poll, reflect.SelectCase{
Dir: reflect.SelectRecv, Dir: reflect.SelectRecv,
Chan: reflect.ValueOf(service.stop), Chan: reflect.ValueOf(srvc.ctx.Done()),
}) })
poll = append(poll, reflect.SelectCase{ poll = append(poll, reflect.SelectCase{
Dir: reflect.SelectRecv, Dir: reflect.SelectRecv,
Chan: reflect.ValueOf(eRecv), Chan: reflect.ValueOf(peerPipe),
}) })
addPoll := func(channel chan *protocol.PacketEvent) {
poll = append(poll, reflect.SelectCase{
Dir: reflect.SelectRecv,
Chan: reflect.ValueOf(channel),
})
}
removePoll := func(index int) {
poll = append(poll[:index], poll[index+1:]...)
}
for { for {
chosen, value, _ := reflect.Select(poll) chosen, value, recvOK := reflect.Select(poll)
if chosen == 0 { switch chosen {
// stop case 0: // cancel signal received, stop the service
// OnDisconnect handler might need to do something important
service.Lock()
service.RangePeers(func(peer *protocol.CNPeer, uData interface{}) bool {
peer.Kill()
service.disconnect(peer)
return true
})
service.Unlock()
// signal we have stopped
close(service.stopped)
return return
} else if chosen == 1 { case 1: // new peer, add it to our poll queue
// new peer, add it to our poll queue if !recvOK {
poll = append(poll, reflect.SelectCase{ return
Dir: reflect.SelectRecv,
Chan: reflect.ValueOf(value.Interface()),
})
} else {
// peer event
event, ok := value.Interface().(*protocol.Event)
if !ok {
panic("invalid event type")
} }
service.Lock() evnt := value.Interface().(newPeerConnection)
switch event.Type { addPoll(evnt.channel)
case protocol.EVENT_CLIENT_DISCONNECT: srvc.connect(evnt.channel, evnt.peer)
// strip the peer from our poll queue default: // peer event
poll = append(poll[:chosen], poll[chosen+1:]...) channel := poll[chosen].Chan.Interface().(chan *protocol.PacketEvent)
service.disconnect(value.Interface().(*protocol.Event).Peer) peer := srvc.getPeer(channel)
case protocol.EVENT_CLIENT_CONNECT: if peer == nil {
service.connect(event.Peer) log.Printf("Unknown peer event: %v", value)
case protocol.EVENT_CLIENT_PACKET: removePoll(chosen)
if err := service.handlePacket(event.Peer, event.PktID, protocol.NewPacket(event.Pkt)); err != nil { continue
log.Printf("Error handling packet: %v", err)
event.Peer.Kill()
}
// the packet buffer is given to us by the event, so we'll need to make sure to return it to the pool
protocol.PutBuffer(event.Pkt)
} }
service.Unlock()
evnt, ok := value.Interface().(*protocol.PacketEvent)
if !recvOK || !ok || evnt == nil {
// peer disconnected, remove it from our poll queue
removePoll(chosen)
srvc.disconnect(channel, peer)
continue
}
srvc.Lock()
if err := srvc.handlePacket(peer, evnt.PktID, protocol.NewPacket(evnt.Pkt)); err != nil {
log.Printf("Error handling packet: %v", err)
peer.Kill()
}
srvc.Unlock()
// the packet buffer is given to us by the event, so we'll need to make sure to return it to the pool
protocol.PutBuffer(evnt.Pkt)
} }
} }
} }
func (service *Service) handlePacket(peer *protocol.CNPeer, typeID uint32, pkt protocol.Packet) error { func (srvc *Service) handlePacket(peer *protocol.CNPeer, typeID uint32, pkt protocol.Packet) error {
uData := service.peers[peer] if hndlr, ok := srvc.packetHandlers[typeID]; ok {
if hndlr, ok := service.packetHandlers[typeID]; ok {
// fmt.Printf("Handling packet %x\n", typeID) // fmt.Printf("Handling packet %x\n", typeID)
if err := hndlr(peer, uData, pkt); err != nil { if err := hndlr(peer, pkt); err != nil {
return err return err
} }
} else { } else {
@ -249,24 +263,20 @@ func (service *Service) handlePacket(peer *protocol.CNPeer, typeID uint32, pkt p
return nil return nil
} }
func (service *Service) disconnect(peer *protocol.CNPeer) { func (srvc *Service) disconnect(channel chan *protocol.PacketEvent, peer *protocol.CNPeer) {
if service.OnDisconnect != nil { log.Printf("Peer %p disconnected from %s\n", peer, srvc.Name)
uData := service.peers[peer] if srvc.OnDisconnect != nil {
service.OnDisconnect(peer, uData) srvc.OnDisconnect(peer)
} }
log.Printf("Peer %p disconnected from %s\n", peer, service.Name) srvc.removePeer(channel)
delete(service.peers, peer)
} }
func (service *Service) connect(peer *protocol.CNPeer) { func (srvc *Service) connect(channel chan *protocol.PacketEvent, peer *protocol.CNPeer) {
// default uData to nil, but if the service has an OnConnect log.Printf("New peer %p connected to %s\n", peer, srvc.Name)
// handler, use the result from that if srvc.OnConnect != nil {
uData := interface{}(nil) srvc.OnConnect(peer)
if service.OnConnect != nil {
uData = service.OnConnect(peer)
} }
log.Printf("New peer %p connected to %s\n", peer, service.Name) srvc.setPeer(channel, peer)
service.SetPeerData(peer, uData)
} }

View File

@ -1,7 +1,9 @@
package service_test package service_test
import ( import (
"context"
"fmt" "fmt"
"log"
"net" "net"
"os" "os"
"sync" "sync"
@ -13,12 +15,11 @@ import (
) )
var ( var (
srvc *service.Service
srvcPort int srvcPort int
) )
const ( const (
timeout = 5 timeout = 2
maxDummyPeers = 5 maxDummyPeers = 5
) )
@ -44,15 +45,18 @@ func TestMain(m *testing.M) {
panic(err) panic(err)
} }
srvc = service.NewService("TEST", srvcPort)
os.Exit(m.Run()) os.Exit(m.Run())
} }
func TestService(t *testing.T) { func TestService(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
srvc := service.NewService(ctx, "TEST", srvcPort)
// waitgroup to wait for test packet handler to be called // waitgroup to wait for test packet handler to be called
wg := sync.WaitGroup{} wg := sync.WaitGroup{}
srvc.AddPacketHandler(0x1234, func(peer *protocol.CNPeer, uData interface{}, pkt protocol.Packet) error { srvc.AddPacketHandler(0x1234, func(peer *protocol.CNPeer, pkt protocol.Packet) error {
log.Printf("Received packet %#v", pkt)
wg.Done() wg.Done()
return nil return nil
}) })
@ -65,7 +69,7 @@ func TestService(t *testing.T) {
// wait for service to start // wait for service to start
<-srvc.Started() <-srvc.Started()
wg.Add(maxDummyPeers) wg.Add(maxDummyPeers * 3) // 2 wg.Done() calls per dummy peer
for i := 0; i < maxDummyPeers; i++ { for i := 0; i < maxDummyPeers; i++ {
go func() { go func() {
// make dummy client // make dummy client
@ -74,18 +78,28 @@ func TestService(t *testing.T) {
t.Error(err) t.Error(err)
} }
peer := protocol.NewCNPeer(conn) peer := protocol.NewCNPeer(ctx, conn)
defer peer.Kill() go func() {
// send dummy packet defer peer.Kill()
if err := peer.Send(0x1234); err != nil {
t.Error(err) // send dummy packets
} for i := 0; i < 2; i++ {
if err := peer.Send(0x1234); err != nil {
t.Error(err)
}
}
}()
// we wait until Handler gracefully exits (peer was killed)
peer.Handler(make(chan *protocol.PacketEvent))
wg.Done()
}() }()
} }
if !waitWithTimeout(&wg, timeout) { if !waitWithTimeout(&wg, timeout) {
t.Error("timeout waiting for packet handler to be called") t.Error("failed to wait for packet handler to be called")
} }
srvc.Stop()
cancel()
<-srvc.Stopped() <-srvc.Stopped()
} }

View File

@ -61,7 +61,7 @@ func (server *LoginServer) AcceptLogin(peer *protocol.CNPeer, SzID string, IClie
return nil return nil
} }
func (server *LoginServer) Login(peer *protocol.CNPeer, _account interface{}, pkt protocol.Packet) error { func (server *LoginServer) Login(peer *protocol.CNPeer, pkt protocol.Packet) error {
var loginPkt protocol.SP_CL2LS_REQ_LOGIN var loginPkt protocol.SP_CL2LS_REQ_LOGIN
pkt.Decode(&loginPkt) pkt.Decode(&loginPkt)
@ -73,9 +73,9 @@ func (server *LoginServer) Login(peer *protocol.CNPeer, _account interface{}, pk
} }
// client is resending a login packet?? // client is resending a login packet??
if _account != nil { if peer.UserData() != nil {
SendError(LOGIN_ERROR) SendError(LOGIN_ERROR)
return fmt.Errorf("out of order P_CL2LS_REQ_LOGIN: %v", _account) return fmt.Errorf("out of order P_CL2LS_REQ_LOGIN: %v", peer.UserData())
} }
// attempt login // attempt login
@ -98,7 +98,7 @@ func (server *LoginServer) Login(peer *protocol.CNPeer, _account interface{}, pk
} }
// grab player data // grab player data
server.service.SetPeerData(peer, account) peer.SetUserData(account)
plrs, err := server.dbHndlr.GetPlayers(account.AccountID) plrs, err := server.dbHndlr.GetPlayers(account.AccountID)
if err != nil { if err != nil {
SendError(LOGIN_DATABASE_ERROR) SendError(LOGIN_DATABASE_ERROR)
@ -137,7 +137,7 @@ func (server *LoginServer) Login(peer *protocol.CNPeer, _account interface{}, pk
return server.AcceptLogin(peer, loginPkt.SzID, loginPkt.IClientVerC, 1, charInfo[:len(plrs)]) return server.AcceptLogin(peer, loginPkt.SzID, loginPkt.IClientVerC, 1, charInfo[:len(plrs)])
} }
func (server *LoginServer) CheckCharacterName(peer *protocol.CNPeer, account interface{}, pkt protocol.Packet) error { func (server *LoginServer) CheckCharacterName(peer *protocol.CNPeer, pkt protocol.Packet) error {
var charPkt protocol.SP_CL2LS_REQ_CHECK_CHAR_NAME var charPkt protocol.SP_CL2LS_REQ_CHECK_CHAR_NAME
pkt.Decode(&charPkt) pkt.Decode(&charPkt)
@ -148,17 +148,18 @@ func (server *LoginServer) CheckCharacterName(peer *protocol.CNPeer, account int
}) })
} }
func (server *LoginServer) SaveCharacterName(peer *protocol.CNPeer, account interface{}, pkt protocol.Packet) error { func (server *LoginServer) SaveCharacterName(peer *protocol.CNPeer, pkt protocol.Packet) error {
var charPkt protocol.SP_CL2LS_REQ_SAVE_CHAR_NAME var charPkt protocol.SP_CL2LS_REQ_SAVE_CHAR_NAME
pkt.Decode(&charPkt) pkt.Decode(&charPkt)
if account == nil { account, ok := peer.UserData().(*db.Account)
if !ok || account == nil {
peer.Send(protocol.P_LS2CL_REP_SAVE_CHAR_NAME_FAIL, protocol.SP_LS2CL_REP_SAVE_CHAR_NAME_FAIL{}) peer.Send(protocol.P_LS2CL_REP_SAVE_CHAR_NAME_FAIL, protocol.SP_LS2CL_REP_SAVE_CHAR_NAME_FAIL{})
return fmt.Errorf("out of order P_LS2CL_REP_SAVE_CHAR_NAME_FAIL") return fmt.Errorf("out of order P_LS2CL_REP_SAVE_CHAR_NAME_FAIL")
} }
// TODO: sanity check SzFirstName && SzLastName // TODO: sanity check SzFirstName && SzLastName
PlayerID, err := server.dbHndlr.NewPlayer(account.(*db.Account).AccountID, charPkt.SzFirstName, charPkt.SzLastName, int(charPkt.ISlotNum)) PlayerID, err := server.dbHndlr.NewPlayer(account.AccountID, charPkt.SzFirstName, charPkt.SzLastName, int(charPkt.ISlotNum))
if err != nil { if err != nil {
peer.Send(protocol.P_LS2CL_REP_SAVE_CHAR_NAME_FAIL, protocol.SP_LS2CL_REP_SAVE_CHAR_NAME_FAIL{}) peer.Send(protocol.P_LS2CL_REP_SAVE_CHAR_NAME_FAIL, protocol.SP_LS2CL_REP_SAVE_CHAR_NAME_FAIL{})
return err return err
@ -210,11 +211,12 @@ func SendFail(peer *protocol.CNPeer) error {
return nil return nil
} }
func (server *LoginServer) CharacterCreate(peer *protocol.CNPeer, account interface{}, pkt protocol.Packet) error { func (server *LoginServer) CharacterCreate(peer *protocol.CNPeer, pkt protocol.Packet) error {
var charPkt protocol.SP_CL2LS_REQ_CHAR_CREATE var charPkt protocol.SP_CL2LS_REQ_CHAR_CREATE
pkt.Decode(&charPkt) pkt.Decode(&charPkt)
if account == nil { account, ok := peer.UserData().(*db.Account)
if !ok || account == nil {
return SendFail(peer) return SendFail(peer)
} }
@ -223,7 +225,7 @@ func (server *LoginServer) CharacterCreate(peer *protocol.CNPeer, account interf
return SendFail(peer) return SendFail(peer)
} }
if err := server.dbHndlr.FinishPlayer(&charPkt, account.(*db.Account).AccountID); err != nil { if err := server.dbHndlr.FinishPlayer(&charPkt, account.AccountID); err != nil {
log.Printf("Error finishing player: %v", err) log.Printf("Error finishing player: %v", err)
return SendFail(peer) return SendFail(peer)
} }
@ -242,15 +244,16 @@ func (server *LoginServer) CharacterCreate(peer *protocol.CNPeer, account interf
}) })
} }
func (server *LoginServer) CharacterDelete(peer *protocol.CNPeer, account interface{}, pkt protocol.Packet) error { func (server *LoginServer) CharacterDelete(peer *protocol.CNPeer, pkt protocol.Packet) error {
var charPkt protocol.SP_CL2LS_REQ_CHAR_DELETE var charPkt protocol.SP_CL2LS_REQ_CHAR_DELETE
pkt.Decode(&charPkt) pkt.Decode(&charPkt)
if account == nil { account, ok := peer.UserData().(*db.Account)
if !ok || account == nil {
return SendFail(peer) return SendFail(peer)
} }
slot, err := server.dbHndlr.DeletePlayer(int(charPkt.IPC_UID), account.(*db.Account).AccountID) slot, err := server.dbHndlr.DeletePlayer(int(charPkt.IPC_UID), account.AccountID)
if err != nil { if err != nil {
return SendFail(peer) return SendFail(peer)
} }
@ -260,11 +263,12 @@ func (server *LoginServer) CharacterDelete(peer *protocol.CNPeer, account interf
}) })
} }
func (server *LoginServer) ShardSelect(peer *protocol.CNPeer, account interface{}, pkt protocol.Packet) error { func (server *LoginServer) ShardSelect(peer *protocol.CNPeer, pkt protocol.Packet) error {
var selection protocol.SP_CL2LS_REQ_CHAR_SELECT var selection protocol.SP_CL2LS_REQ_CHAR_SELECT
pkt.Decode(&selection) pkt.Decode(&selection)
if account == nil { account, ok := peer.UserData().(*db.Account)
if !ok || account == nil {
return SendFail(peer) return SendFail(peer)
} }
@ -289,7 +293,7 @@ func (server *LoginServer) ShardSelect(peer *protocol.CNPeer, account interface{
log.Printf("Error getting player: %v", err) log.Printf("Error getting player: %v", err)
return SendFail(peer) return SendFail(peer)
} }
accountID := account.(*db.Account).AccountID accountID := account.AccountID
if plr.AccountID != accountID { if plr.AccountID != accountID {
log.Printf("HACK: player %d tried to join shard as player %d", accountID, plr.AccountID) log.Printf("HACK: player %d tried to join shard as player %d", accountID, plr.AccountID)
@ -315,15 +319,16 @@ func (server *LoginServer) ShardSelect(peer *protocol.CNPeer, account interface{
return peer.Send(protocol.P_LS2CL_REP_SHARD_SELECT_SUCC, resp) return peer.Send(protocol.P_LS2CL_REP_SHARD_SELECT_SUCC, resp)
} }
func (server *LoginServer) FinishTutorial(peer *protocol.CNPeer, account interface{}, pkt protocol.Packet) error { func (server *LoginServer) FinishTutorial(peer *protocol.CNPeer, pkt protocol.Packet) error {
var charPkt protocol.SP_CL2LS_REQ_SAVE_CHAR_TUTOR var charPkt protocol.SP_CL2LS_REQ_SAVE_CHAR_TUTOR
pkt.Decode(&charPkt) pkt.Decode(&charPkt)
if account == nil { account, ok := peer.UserData().(*db.Account)
if !ok || account == nil {
return SendFail(peer) return SendFail(peer)
} }
if err := server.dbHndlr.FinishTutorial(int(charPkt.IPC_UID), account.(*db.Account).AccountID); err != nil { if err := server.dbHndlr.FinishTutorial(int(charPkt.IPC_UID), account.AccountID); err != nil {
return SendFail(peer) return SendFail(peer)
} }

View File

@ -1,6 +1,8 @@
package login package login
import ( import (
"context"
"github.com/CPunch/gopenfusion/internal/db" "github.com/CPunch/gopenfusion/internal/db"
"github.com/CPunch/gopenfusion/internal/protocol" "github.com/CPunch/gopenfusion/internal/protocol"
"github.com/CPunch/gopenfusion/internal/redis" "github.com/CPunch/gopenfusion/internal/redis"
@ -13,8 +15,8 @@ type LoginServer struct {
redisHndlr *redis.RedisHandler redisHndlr *redis.RedisHandler
} }
func NewLoginServer(dbHndlr *db.DBHandler, redisHndlr *redis.RedisHandler, port int) (*LoginServer, error) { func NewLoginServer(ctx context.Context, dbHndlr *db.DBHandler, redisHndlr *redis.RedisHandler, port int) (*LoginServer, error) {
srvc := service.NewService("LOGIN", port) srvc := service.NewService(ctx, "LOGIN", port)
server := &LoginServer{ server := &LoginServer{
service: srvc, service: srvc,
@ -37,17 +39,9 @@ func NewLoginServer(dbHndlr *db.DBHandler, redisHndlr *redis.RedisHandler, port
srvc.AddPacketHandler(protocol.P_CL2LS_REQ_CHANGE_CHAR_NAME, service.StubbedPacket) srvc.AddPacketHandler(protocol.P_CL2LS_REQ_CHANGE_CHAR_NAME, service.StubbedPacket)
srvc.AddPacketHandler(protocol.P_CL2LS_REQ_SERVER_SELECT, service.StubbedPacket) srvc.AddPacketHandler(protocol.P_CL2LS_REQ_SERVER_SELECT, service.StubbedPacket)
srvc.OnConnect = func(peer *protocol.CNPeer) interface{} {
return nil
}
return server, nil return server, nil
} }
func (server *LoginServer) Start() error { func (server *LoginServer) Start() error {
return server.service.Start() return server.service.Start()
} }
func (server *LoginServer) Stop() {
server.service.Stop()
}

View File

@ -7,14 +7,14 @@ import (
"github.com/CPunch/gopenfusion/internal/protocol" "github.com/CPunch/gopenfusion/internal/protocol"
) )
func (server *ShardServer) freeChat(peer *protocol.CNPeer, _plr interface{}, pkt protocol.Packet) error { func (server *ShardServer) freeChat(peer *protocol.CNPeer, pkt protocol.Packet) error {
var chat protocol.SP_CL2FE_REQ_SEND_FREECHAT_MESSAGE var chat protocol.SP_CL2FE_REQ_SEND_FREECHAT_MESSAGE
pkt.Decode(&chat) pkt.Decode(&chat)
if _plr == nil { plr, ok := peer.UserData().(*entity.Player)
return fmt.Errorf("freeChat: _plr is nil") if !ok || plr == nil {
return fmt.Errorf("freeChat: plr is nil")
} }
plr := _plr.(*entity.Player)
// spread message // spread message
return server.sendAllPacket(plr, protocol.P_FE2CL_REP_SEND_FREECHAT_MESSAGE_SUCC, protocol.SP_FE2CL_REP_SEND_FREECHAT_MESSAGE_SUCC{ return server.sendAllPacket(plr, protocol.P_FE2CL_REP_SEND_FREECHAT_MESSAGE_SUCC, protocol.SP_FE2CL_REP_SEND_FREECHAT_MESSAGE_SUCC{
@ -24,14 +24,14 @@ func (server *ShardServer) freeChat(peer *protocol.CNPeer, _plr interface{}, pkt
}) })
} }
func (server *ShardServer) menuChat(peer *protocol.CNPeer, _plr interface{}, pkt protocol.Packet) error { func (server *ShardServer) menuChat(peer *protocol.CNPeer, pkt protocol.Packet) error {
var chat protocol.SP_CL2FE_REQ_SEND_MENUCHAT_MESSAGE var chat protocol.SP_CL2FE_REQ_SEND_MENUCHAT_MESSAGE
pkt.Decode(&chat) pkt.Decode(&chat)
if _plr == nil { plr, ok := peer.UserData().(*entity.Player)
return fmt.Errorf("menuChat: _plr is nil") if !ok || plr == nil {
return fmt.Errorf("menuChat: plr is nil")
} }
plr := _plr.(*entity.Player)
// spread message // spread message
return server.sendAllPacket(plr, protocol.P_FE2CL_REP_SEND_MENUCHAT_MESSAGE_SUCC, protocol.SP_FE2CL_REP_SEND_MENUCHAT_MESSAGE_SUCC{ return server.sendAllPacket(plr, protocol.P_FE2CL_REP_SEND_MENUCHAT_MESSAGE_SUCC, protocol.SP_FE2CL_REP_SEND_MENUCHAT_MESSAGE_SUCC{
@ -41,14 +41,14 @@ func (server *ShardServer) menuChat(peer *protocol.CNPeer, _plr interface{}, pkt
}) })
} }
func (server *ShardServer) emoteChat(peer *protocol.CNPeer, _plr interface{}, pkt protocol.Packet) error { func (server *ShardServer) emoteChat(peer *protocol.CNPeer, pkt protocol.Packet) error {
var chat protocol.SP_CL2FE_REQ_PC_AVATAR_EMOTES_CHAT var chat protocol.SP_CL2FE_REQ_PC_AVATAR_EMOTES_CHAT
pkt.Decode(&chat) pkt.Decode(&chat)
if _plr == nil { plr, ok := peer.UserData().(*entity.Player)
return fmt.Errorf("emoteChat: _plr is nil") if !ok || plr == nil {
return fmt.Errorf("emoteChat: plr is nil")
} }
plr := _plr.(*entity.Player)
// spread message // spread message
return server.sendAllPacket(plr, protocol.P_FE2CL_REP_PC_AVATAR_EMOTES_CHAT, protocol.SP_FE2CL_REP_PC_AVATAR_EMOTES_CHAT{ return server.sendAllPacket(plr, protocol.P_FE2CL_REP_PC_AVATAR_EMOTES_CHAT, protocol.SP_FE2CL_REP_PC_AVATAR_EMOTES_CHAT{

View File

@ -20,16 +20,17 @@ func (server *ShardServer) attachPlayer(peer *protocol.CNPeer, meta redis.LoginM
// server.Start() goroutine. the only functions allowed to access // server.Start() goroutine. the only functions allowed to access
// it are the packet handlers as no other goroutines will be // it are the packet handlers as no other goroutines will be
// concurrently accessing it. // concurrently accessing it.
server.service.SetPeerData(peer, plr) peer.SetUserData(plr)
return plr, nil return plr, nil
} }
func (server *ShardServer) RequestEnter(peer *protocol.CNPeer, _plr interface{}, pkt protocol.Packet) error { func (server *ShardServer) RequestEnter(peer *protocol.CNPeer, pkt protocol.Packet) error {
var enter protocol.SP_CL2FE_REQ_PC_ENTER var enter protocol.SP_CL2FE_REQ_PC_ENTER
pkt.Decode(&enter) pkt.Decode(&enter)
// resending a shard enter packet? // resending a shard enter packet?
if _plr != nil { _plr, ok := peer.UserData().(*entity.Player)
if ok && _plr != nil {
return fmt.Errorf("resent enter packet") return fmt.Errorf("resent enter packet")
} }
@ -64,15 +65,15 @@ func (server *ShardServer) RequestEnter(peer *protocol.CNPeer, _plr interface{},
return nil return nil
} }
func (server *ShardServer) LoadingComplete(peer *protocol.CNPeer, _plr interface{}, pkt protocol.Packet) error { func (server *ShardServer) LoadingComplete(peer *protocol.CNPeer, pkt protocol.Packet) error {
var loadComplete protocol.SP_CL2FE_REQ_PC_LOADING_COMPLETE var loadComplete protocol.SP_CL2FE_REQ_PC_LOADING_COMPLETE
pkt.Decode(&loadComplete) pkt.Decode(&loadComplete)
// was the peer attached to a player? // was the peer attached to a player?
if _plr == nil { plr, ok := peer.UserData().(*entity.Player)
if !ok || plr == nil {
return fmt.Errorf("loadingComplete: plr is nil") return fmt.Errorf("loadingComplete: plr is nil")
} }
plr := _plr.(*entity.Player)
err := peer.Send(protocol.P_FE2CL_REP_PC_LOADING_COMPLETE_SUCC, protocol.SP_FE2CL_REP_PC_LOADING_COMPLETE_SUCC{IPC_ID: int32(plr.PlayerID)}) err := peer.Send(protocol.P_FE2CL_REP_PC_LOADING_COMPLETE_SUCC, protocol.SP_FE2CL_REP_PC_LOADING_COMPLETE_SUCC{IPC_ID: int32(plr.PlayerID)})
if err != nil { if err != nil {

View File

@ -15,14 +15,14 @@ func (server *ShardServer) updatePlayerPosition(plr *entity.Player, X, Y, Z, Ang
server.updateEntityChunk(plr, plr.GetChunkPos(), entity.MakeChunkPosition(X, Y)) server.updateEntityChunk(plr, plr.GetChunkPos(), entity.MakeChunkPosition(X, Y))
} }
func (server *ShardServer) playerMove(peer *protocol.CNPeer, _plr interface{}, pkt protocol.Packet) error { func (server *ShardServer) playerMove(peer *protocol.CNPeer, pkt protocol.Packet) error {
var move protocol.SP_CL2FE_REQ_PC_MOVE var move protocol.SP_CL2FE_REQ_PC_MOVE
pkt.Decode(&move) pkt.Decode(&move)
if _plr == nil { plr, ok := peer.UserData().(*entity.Player)
return fmt.Errorf("playerMove: _plr is nil") if !ok || plr == nil {
return fmt.Errorf("playerMove: plr is nil")
} }
plr := _plr.(*entity.Player)
// update chunking // update chunking
server.updatePlayerPosition(plr, int(move.IX), int(move.IY), int(move.IZ), int(move.IAngle)) server.updatePlayerPosition(plr, int(move.IX), int(move.IY), int(move.IZ), int(move.IAngle))
@ -43,14 +43,14 @@ func (server *ShardServer) playerMove(peer *protocol.CNPeer, _plr interface{}, p
}) })
} }
func (server *ShardServer) playerStop(peer *protocol.CNPeer, _plr interface{}, pkt protocol.Packet) error { func (server *ShardServer) playerStop(peer *protocol.CNPeer, pkt protocol.Packet) error {
var stop protocol.SP_CL2FE_REQ_PC_STOP var stop protocol.SP_CL2FE_REQ_PC_STOP
pkt.Decode(&stop) pkt.Decode(&stop)
if _plr == nil { plr, ok := peer.UserData().(*entity.Player)
return fmt.Errorf("playerStop: _plr is nil") if !ok || plr == nil {
return fmt.Errorf("playerStop: plr is nil")
} }
plr := _plr.(*entity.Player)
// update chunking // update chunking
server.updatePlayerPosition(plr, int(stop.IX), int(stop.IY), int(stop.IZ), plr.Angle) server.updatePlayerPosition(plr, int(stop.IX), int(stop.IY), int(stop.IZ), plr.Angle)
@ -65,14 +65,14 @@ func (server *ShardServer) playerStop(peer *protocol.CNPeer, _plr interface{}, p
}) })
} }
func (server *ShardServer) playerJump(peer *protocol.CNPeer, _plr interface{}, pkt protocol.Packet) error { func (server *ShardServer) playerJump(peer *protocol.CNPeer, pkt protocol.Packet) error {
var jump protocol.SP_CL2FE_REQ_PC_JUMP var jump protocol.SP_CL2FE_REQ_PC_JUMP
pkt.Decode(&jump) pkt.Decode(&jump)
if _plr == nil { plr, ok := peer.UserData().(*entity.Player)
if !ok || plr == nil {
return fmt.Errorf("playerJump: _plr is nil") return fmt.Errorf("playerJump: _plr is nil")
} }
plr := _plr.(*entity.Player)
// update chunking // update chunking
server.updatePlayerPosition(plr, int(jump.IX), int(jump.IY), int(jump.IZ), plr.Angle) server.updatePlayerPosition(plr, int(jump.IX), int(jump.IY), int(jump.IZ), plr.Angle)

View File

@ -1,6 +1,8 @@
package shard package shard
import ( import (
"context"
"github.com/CPunch/gopenfusion/config" "github.com/CPunch/gopenfusion/config"
"github.com/CPunch/gopenfusion/internal/db" "github.com/CPunch/gopenfusion/internal/db"
"github.com/CPunch/gopenfusion/internal/entity" "github.com/CPunch/gopenfusion/internal/entity"
@ -18,8 +20,8 @@ type ShardServer struct {
chunks map[entity.ChunkPosition]*entity.Chunk chunks map[entity.ChunkPosition]*entity.Chunk
} }
func NewShardServer(dbHndlr *db.DBHandler, redisHndlr *redis.RedisHandler, port int) (*ShardServer, error) { func NewShardServer(ctx context.Context, dbHndlr *db.DBHandler, redisHndlr *redis.RedisHandler, port int) (*ShardServer, error) {
srvc := service.NewService("SHARD", port) srvc := service.NewService(ctx, "SHARD", port)
server := &ShardServer{ server := &ShardServer{
service: srvc, service: srvc,
@ -53,13 +55,14 @@ func (server *ShardServer) Start() {
server.service.Start() server.service.Start()
} }
func (server *ShardServer) onDisconnect(peer *protocol.CNPeer, _plr interface{}) { func (server *ShardServer) onDisconnect(peer *protocol.CNPeer) {
// remove from chunks // remove from chunks
if _plr != nil { plr, ok := peer.UserData().(*entity.Player)
server.removeEntity(_plr.(*entity.Player)) if ok && plr != nil {
server.removeEntity(plr)
} }
} }
func (server *ShardServer) onConnect(peer *protocol.CNPeer) interface{} { func (server *ShardServer) onConnect(peer *protocol.CNPeer) {
return nil
} }