mirror of
https://github.com/CPunch/gopenfusion.git
synced 2024-11-21 23:10:06 +00:00
server/shard: added UpdatePlayer(), which should be used whenever player struct state needs to be updated to prevent race conditions
This commit is contained in:
parent
bb29a988b3
commit
70e42b5d79
@ -2,9 +2,9 @@
|
|||||||
|
|
||||||
A toy implementation of the [Fusionfall Packet Protocol](https://openpunk.com/pages/fusionfall-openfusion/) written in Go.
|
A toy implementation of the [Fusionfall Packet Protocol](https://openpunk.com/pages/fusionfall-openfusion/) written in Go.
|
||||||
|
|
||||||
## Login Sever
|
## Landwalker demo
|
||||||
|
|
||||||
An example login server implementation exists in `server/`. This implementation should be compatible with existing OpenFusion databases, however this only exists as an example and doesn't direct clients to a shard server (they're softlocked after the tutorial, or during character selection).
|
An implementation of a landwalker server is located in `server/`. This includes a functional login server and a dummy shard (supporting the minimum amount of packets necessary). The DB implementation in `core/db/` matches the OpenFusion 1.4 SQLite tables, which the login server located in `server/` uses. There's no support for NPCs nor other players, and is liable to softlock the client.
|
||||||
|
|
||||||
## Generating structures
|
## Generating structures
|
||||||
|
|
||||||
|
@ -120,7 +120,7 @@ func (peer *CNPeer) Handler() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// grab buffer && read packet body
|
// grab buffer && read packet body
|
||||||
if err := func() error { // we wrap this in a closure so we can easily defer the buffer return to pool
|
if err := func() error {
|
||||||
buf := pool.Get()
|
buf := pool.Get()
|
||||||
defer pool.Put(buf)
|
defer pool.Put(buf)
|
||||||
if _, err := buf.ReadFrom(io.LimitReader(peer.conn, int64(sz))); err != nil {
|
if _, err := buf.ReadFrom(io.LimitReader(peer.conn, int64(sz))); err != nil {
|
||||||
|
@ -4,6 +4,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/CPunch/gopenfusion/core"
|
||||||
"github.com/CPunch/gopenfusion/core/protocol"
|
"github.com/CPunch/gopenfusion/core/protocol"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -18,21 +19,30 @@ func (server *ShardServer) RequestEnter(peer *protocol.CNPeer, pkt protocol.Pack
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
plr, err := server.dbHndlr.GetPlayer(int(loginData.PlayerID))
|
// attach player
|
||||||
if err != nil {
|
var resp *protocol.SP_FE2CL_REP_PC_ENTER_SUCC
|
||||||
peer.Send(protocol.P_FE2CL_REP_PC_ENTER_FAIL, protocol.SP_FE2CL_REP_PC_ENTER_FAIL{})
|
if err := server.UpdatePlayer(peer, func(old *core.Player) (*core.Player, error) {
|
||||||
return err
|
if old != nil { // resending a shard enter packet?
|
||||||
|
return nil, fmt.Errorf("resent enter packet!")
|
||||||
}
|
}
|
||||||
|
|
||||||
// attach player
|
plr, err := server.dbHndlr.GetPlayer(int(loginData.PlayerID))
|
||||||
server.JoinPlayer(peer, plr)
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
resp := &protocol.SP_FE2CL_REP_PC_ENTER_SUCC{
|
resp = &protocol.SP_FE2CL_REP_PC_ENTER_SUCC{
|
||||||
IID: int32(plr.PlayerID),
|
IID: int32(plr.PlayerID),
|
||||||
PCLoadData2CL: plr.ToPCLoadData2CL(),
|
PCLoadData2CL: plr.ToPCLoadData2CL(),
|
||||||
UiSvrTime: uint64(time.Now().Unix()),
|
UiSvrTime: uint64(time.Now().Unix()),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return plr, nil
|
||||||
|
}); err != nil {
|
||||||
|
peer.Send(protocol.P_FE2CL_REP_PC_ENTER_FAIL, protocol.SP_FE2CL_REP_PC_ENTER_FAIL{})
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
// setup key
|
// setup key
|
||||||
peer.E_key = protocol.CreateNewKey(resp.UiSvrTime, uint64(resp.IID+1), uint64(resp.PCLoadData2CL.IFusionMatter+1))
|
peer.E_key = protocol.CreateNewKey(resp.UiSvrTime, uint64(resp.IID+1), uint64(resp.PCLoadData2CL.IFusionMatter+1))
|
||||||
peer.FE_key = loginData.FEKey
|
peer.FE_key = loginData.FEKey
|
||||||
@ -45,7 +55,7 @@ func (server *ShardServer) LoadingComplete(peer *protocol.CNPeer, pkt protocol.P
|
|||||||
var loadComplete protocol.SP_CL2FE_REQ_PC_LOADING_COMPLETE
|
var loadComplete protocol.SP_CL2FE_REQ_PC_LOADING_COMPLETE
|
||||||
pkt.Decode(&loadComplete)
|
pkt.Decode(&loadComplete)
|
||||||
|
|
||||||
plr := server.GetPlayer(peer)
|
plr := server.LoadPlayer(peer)
|
||||||
if plr == nil {
|
if plr == nil {
|
||||||
return fmt.Errorf("peer has no player attached!")
|
return fmt.Errorf("peer has no player attached!")
|
||||||
}
|
}
|
||||||
|
@ -23,8 +23,9 @@ type ShardServer struct {
|
|||||||
port int
|
port int
|
||||||
dbHndlr *db.DBHandler
|
dbHndlr *db.DBHandler
|
||||||
packetHandlers map[uint32]PacketHandler
|
packetHandlers map[uint32]PacketHandler
|
||||||
peers sync.Map // [*protocol.CNPeer]*core.Player
|
|
||||||
loginMetadataQueue sync.Map // [int64]*LoginMetadata w/ int64 = serialKey
|
loginMetadataQueue sync.Map // [int64]*LoginMetadata w/ int64 = serialKey
|
||||||
|
peersLock sync.Mutex
|
||||||
|
peers sync.Map // [*protocol.CNPeer]core.Player
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewShardServer(dbHndlr *db.DBHandler, port int) (*ShardServer, error) {
|
func NewShardServer(dbHndlr *db.DBHandler, port int) (*ShardServer, error) {
|
||||||
@ -90,7 +91,7 @@ func (server *ShardServer) Connect(peer *protocol.CNPeer) {
|
|||||||
server.peers.Store(peer, nil)
|
server.peers.Store(peer, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (server *ShardServer) GetPlayer(peer *protocol.CNPeer) *core.Player {
|
func (server *ShardServer) LoadPlayer(peer *protocol.CNPeer) *core.Player {
|
||||||
val, ok := server.peers.Load(peer)
|
val, ok := server.peers.Load(peer)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil
|
return nil
|
||||||
@ -104,7 +105,25 @@ func (server *ShardServer) GetPlayer(peer *protocol.CNPeer) *core.Player {
|
|||||||
return plr
|
return plr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (server *ShardServer) JoinPlayer(peer *protocol.CNPeer, player *core.Player) {
|
// UpdatePlayer locks the peers map, and calls the provided callback. The returned new pointer will be stored, however if an error returns it will be passed back.
|
||||||
|
// Since it is UNSAFE to write to the returned pointer from LoadPlayer, this wrapper is for the cases that state in the player struct needs to be updated.
|
||||||
|
// TODO: maybe LoadPlayer should return a player by value instead?
|
||||||
|
// The pointers new and old may be the same if you are just updating struct fields.
|
||||||
|
func (server *ShardServer) UpdatePlayer(peer *protocol.CNPeer, f func(old *core.Player) (new *core.Player, err error)) error {
|
||||||
|
server.peersLock.Lock()
|
||||||
|
defer server.peersLock.Unlock()
|
||||||
|
|
||||||
|
// on fail, the player should not be stored
|
||||||
|
new, err := f(server.LoadPlayer(peer))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
server.storePlayer(peer, new)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (server *ShardServer) storePlayer(peer *protocol.CNPeer, player *core.Player) {
|
||||||
server.peers.Store(peer, player)
|
server.peers.Store(peer, player)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user