This commit is contained in:
shoopea 2020-01-26 14:42:44 +08:00
commit 2f168b12e9
9 changed files with 230 additions and 42 deletions

View File

@ -6,7 +6,6 @@ ChirpNestBot
- [ ] Make sure refresh message receives the message later (jobMsgRefresh) - [ ] Make sure refresh message receives the message later (jobMsgRefresh)
- [ ] Adjust /time clock for auctions/... (delay between cw and real time ?) - [ ] Adjust /time clock for auctions/... (delay between cw and real time ?)
- [ ] Update old auctions with client - [ ] Update old auctions with client
- [ ] Convert config to json and insert sql structure in it
- [ ] Add metrics - [ ] Add metrics
- [ ] Eliminate cache map race conditions (all maps with read/write) and use redis ? - [ ] Eliminate cache map race conditions (all maps with read/write) and use redis ?
- [ ] Transform import/export to jobs, feed msg to identification channel instead of inserting directly - [ ] Transform import/export to jobs, feed msg to identification channel instead of inserting directly
@ -15,8 +14,6 @@ ChirpNestBot
- [ ] Update items parsing/identification with gear details (atk/def/mana) - [ ] Update items parsing/identification with gear details (atk/def/mana)
- [ ] Auto identificate weight of items - [ ] Auto identificate weight of items
- [ ] Tribute interception - [ ] Tribute interception
- [ ] Withdrawal bot
- [x] Crontab : engine
- [ ] Crontab : user interface - [ ] Crontab : user interface
- [ ] Resources hidding - [ ] Resources hidding
- [ ] Resources auto destroy - [ ] Resources auto destroy
@ -27,8 +24,12 @@ ChirpNestBot
- [ ] Impersonate - [ ] Impersonate
- [ ] Link TelegramUserID and UserID (can use historic auction messages) - [ ] Link TelegramUserID and UserID (can use historic auction messages)
- [ ] Issue with Squire in the /g_roles ? - [ ] Issue with Squire in the /g_roles ?
- [x] Foray interception
- [ ] Handle Foray timeout to stop spamming in case something goes bad - [ ] Handle Foray timeout to stop spamming in case something goes bad
- [ ] Insert sql structure in config ?
- [x] Convert config to json
- [x] Foray interception
- [x] Withdrawal bot
- [x] Crontab : engine
- [x] Export/import all messages - [x] Export/import all messages
- [x] Test HTML in message - [x] Test HTML in message
- [x] Update auction from broadcast - [x] Update auction from broadcast

1
bot.go
View File

@ -880,6 +880,7 @@ func botGDepositAll(m *tb.Message) {
} }
p.ResObjID64 = append(p.ResObjID64, getObjItemID(`03`, `Pelt`)) p.ResObjID64 = append(p.ResObjID64, getObjItemID(`03`, `Pelt`))
p.ResObjID64 = append(p.ResObjID64, getObjItemID(`04`, `Bone`)) p.ResObjID64 = append(p.ResObjID64, getObjItemID(`04`, `Bone`))
p.ResObjID64 = append(p.ResObjID64, getObjItemID(`05`, `Coal`))
p.ResObjID64 = append(p.ResObjID64, getObjItemID(`07`, `Powder`)) p.ResObjID64 = append(p.ResObjID64, getObjItemID(`07`, `Powder`))
p.ResObjID64 = append(p.ResObjID64, getObjItemID(`08`, `Iron Ore`)) p.ResObjID64 = append(p.ResObjID64, getObjItemID(`08`, `Iron Ore`))
p.ResObjID64 = append(p.ResObjID64, getObjItemID(`09`, `Cloth`)) p.ResObjID64 = append(p.ResObjID64, getObjItemID(`09`, `Cloth`))

View File

@ -34,6 +34,21 @@
"name": "Guild withdraw conf ack", "name": "Guild withdraw conf ack",
"obj_type": "msg" "obj_type": "msg"
}, },
{
"intl_id": "msg_g_inspect_req",
"name": "Guild inspect reg",
"obj_type": "msg"
},
{
"intl_id": "msg_inspect_ack",
"name": "Item inspect ack",
"obj_type": "msg"
},
{
"intl_id": "msg_invalid_action",
"name": "Item inspect ack",
"obj_type": "msg"
},
{ {
"intl_id": "msg_war", "intl_id": "msg_war",
"name": "War report", "name": "War report",

View File

@ -1830,5 +1830,29 @@
"msg_type": "msg_ny2020_battle", "msg_type": "msg_ny2020_battle",
"chat_id": -1001198527605, "chat_id": -1001198527605,
"user_id": 841616455 "user_id": 841616455
},
{
"prio": 5000,
"descn": "Invalid action",
"rule": "^\\[invalid action\\]$",
"msg_type": "msg_invalid_action",
"chat_id": 0,
"user_id": 0
},
{
"prio": 5000,
"descn": "Guild inspect req",
"rule": "^/g_inspect_(?P<Item>[a-z0-9]+)$",
"msg_type": "msg_g_inspect_req",
"chat_id": 0,
"user_id": 0
},
{
"prio": 5000,
"descn": "Item inspect ack",
"rule": "^Item: (?P<Name>.*)$",
"msg_type": "msg_inspect_ack",
"chat_id": 0,
"user_id": 0
} }
] ]

6
def.go
View File

@ -334,6 +334,10 @@ type ChatWarsMessageTimeAck struct {
WeatherNext string `json:"weather_next"` WeatherNext string `json:"weather_next"`
} }
type ChatWarsMessageInspectAck struct {
Name string `json:"name"`
}
type ChatWarsMessageJobGWithdrawAck struct { type ChatWarsMessageJobGWithdrawAck struct {
Msg *ChatWarsMessage `json:"msg"` Msg *ChatWarsMessage `json:"msg"`
Ref string `json:"ref"` Ref string `json:"ref"`
@ -408,6 +412,7 @@ type JobPayloadGWithdrawItem struct {
Name string `json:"name"` Name string `json:"name"`
Available int64 `json:"available"` Available int64 `json:"available"`
Required int64 `json:"required"` Required int64 `json:"required"`
Inspect bool `json:"inspect"`
} }
type JobPayloadGWithdraw struct { type JobPayloadGWithdraw struct {
@ -418,6 +423,7 @@ type JobPayloadGWithdraw struct {
Status int64 `json:"status"` Status int64 `json:"status"`
CleanupMsg []ChatWarsMessage `json:"cleanup_msg"` CleanupMsg []ChatWarsMessage `json:"cleanup_msg"`
Validated bool `json:"validated"` Validated bool `json:"validated"`
Inspecting string `json:"inspecting"`
} }
type JobPayloadGDeposit struct { type JobPayloadGDeposit struct {

152
job.go
View File

@ -23,12 +23,23 @@ import (
) )
func createJob(jobTypeID64 int64, priority int32, userID64 int64, trigger int64, schedule time.Time, payload []byte) (int64, error) { func createJob(jobTypeID64 int64, priority int32, userID64 int64, trigger int64, schedule time.Time, payload []byte) (int64, error) {
var zb bytes.Buffer
var (
zb bytes.Buffer
zpayload []byte
zipped int
)
if len(payload) > 10000 {
zw := zlib.NewWriter(&zb) zw := zlib.NewWriter(&zb)
zw.Write(payload) zw.Write(payload)
zw.Close() zw.Close()
zpayload := zb.Bytes() zpayload = zb.Bytes()
zipped = 1
} else {
zpayload = payload
zipped = 0
}
if len(zpayload) > 20000 { if len(zpayload) > 20000 {
return 0, errors.New("payload too long") return 0, errors.New("payload too long")
@ -55,15 +66,15 @@ func createJob(jobTypeID64 int64, priority int32, userID64 int64, trigger int64,
return 0, err return 0, err
} }
stmt, err = db.Prepare(`INSERT INTO obj_job (obj_id, priority, user_id, trigger_id, seq_nr, schedule, is_done, in_work, inserted, timeout, pulled, started, ended, payload) stmt, err = db.Prepare(`INSERT INTO obj_job (obj_id, priority, user_id, trigger_id, seq_nr, schedule, is_done, in_work, inserted, timeout, pulled, started, ended, zipped, payload)
VALUES (?, ?, ?, ?, NULL, ?, 0, 0, ?, ?, NULL, NULL, NULL, ?);`) VALUES (?, ?, ?, ?, NULL, ?, 0, 0, ?, ?, NULL, NULL, NULL, ?, ?);`)
logOnError(err, "createJob : prepare insert obj_job") logOnError(err, "createJob : prepare insert obj_job")
if err != nil { if err != nil {
return 0, err return 0, err
} }
defer stmt.Close() defer stmt.Close()
_, err = stmt.Exec(objId, priority, userID64, trigger, schedule.UTC(), time.Now().UTC(), time.Unix(maxUnixTimestamp, 0).UTC(), zpayload) _, err = stmt.Exec(objId, priority, userID64, trigger, schedule.UTC(), time.Now().UTC(), time.Unix(maxUnixTimestamp, 0).UTC(), zipped, zpayload)
logOnError(err, "createJob : insert obj_job") logOnError(err, "createJob : insert obj_job")
if err != nil { if err != nil {
return 0, err return 0, err
@ -145,8 +156,13 @@ func setJobPayload(jobID64 int64, payload []byte) error {
return errors.New("payload too long") return errors.New("payload too long")
} }
<<<<<<< HEAD
stmt, err := db.Prepare(`UPDATE obj_job j SET j.payload = ? WHERE j.obj_id = ?;`) stmt, err := db.Prepare(`UPDATE obj_job j SET j.payload = ? WHERE j.obj_id = ?;`)
logOnError(err, "setJobPayload : prepare update obj_job") logOnError(err, "setJobPayload : prepare update obj_job")
=======
stmt, err := db.Prepare(`UPDATE obj_job j SET j.payload = ?, j.zipped = 1 WHERE j.obj_id = ?;`)
logOnError(err, "setJobTimeout : prepare update obj_job")
>>>>>>> refs/remotes/origin/master
if err != nil { if err != nil {
return err return err
} }
@ -227,6 +243,7 @@ func loadCurrentJobs() ([]Job, error) {
userID64 int64 userID64 int64
trigger int64 trigger int64
timeout time.Time timeout time.Time
zipped int
zpayload []byte zpayload []byte
jobs []Job jobs []Job
) )
@ -240,7 +257,7 @@ func loadCurrentJobs() ([]Job, error) {
return jobs, err return jobs, err
} }
stmt, err := db.Prepare("SELECT o.id, o.obj_sub_type_id, j.trigger_id, j.user_id, j.payload, j.timeout FROM obj_job j, obj o WHERE j.obj_id = o.id AND j.is_done = 0 AND j.in_work = 1 AND j.seq_nr = ? ORDER BY j.priority ASC, j.obj_id ASC;") stmt, err := db.Prepare("SELECT o.id, o.obj_sub_type_id, j.trigger_id, j.user_id, j.zipped, j.payload, j.timeout FROM obj_job j, obj o WHERE j.obj_id = o.id AND j.is_done = 0 AND j.in_work = 1 AND j.seq_nr = ? ORDER BY j.priority ASC, j.obj_id ASC;")
logOnError(err, "loadCurrentJobs : prepare select statement") logOnError(err, "loadCurrentJobs : prepare select statement")
if err != nil { if err != nil {
stmt.Close() stmt.Close()
@ -256,9 +273,11 @@ func loadCurrentJobs() ([]Job, error) {
} }
for rows.Next() { for rows.Next() {
err = rows.Scan(&objId, &jobTypeID64, &trigger, &userID64, &zpayload, &timeout) err = rows.Scan(&objId, &jobTypeID64, &trigger, &userID64, &zipped, &zpayload, &timeout)
logOnError(err, "loadCurrentJobs : scan query rows") logOnError(err, "loadCurrentJobs : scan query rows")
var payload []byte
if zipped > 0 {
zb := bytes.NewReader(zpayload) zb := bytes.NewReader(zpayload)
zr, err := zlib.NewReader(zb) zr, err := zlib.NewReader(zb)
if err != nil { if err != nil {
@ -267,7 +286,10 @@ func loadCurrentJobs() ([]Job, error) {
} }
b := new(bytes.Buffer) b := new(bytes.Buffer)
b.ReadFrom(zr) b.ReadFrom(zr)
payload := b.Bytes() payload = b.Bytes()
} else {
payload = zpayload
}
job := Job{ job := Job{
ID64: objId, ID64: objId,
@ -1445,12 +1467,13 @@ func jobGWithdraw(j Job) {
logOnError(err, "jobGWithdraw : Unmarshal payload") logOnError(err, "jobGWithdraw : Unmarshal payload")
if p.Status == 0 { if p.Status == 0 {
for _, item := range p.Items { for k, item := range p.Items {
id := getSilentObjItemID(item.Code, ``) id := getSilentObjItemID(item.Code, ``)
if id != 0 { if id != 0 {
obj, _ := getObjItem(id) obj, _ := getObjItem(id)
p.Status = p.Status | reqTab[obj.ItemTypeID] p.Status = p.Status | reqTab[obj.ItemTypeID]
} else if ok, _ := regexp.MatchString(`^u[0-9]+$`, item.Code); ok { } else if ok, _ := regexp.MatchString(`^u[0-9]+$`, item.Code); ok {
p.Items[k].Inspect = true
p.Status = p.Status | reqTab[cacheObjSubType[`item_other`]] p.Status = p.Status | reqTab[cacheObjSubType[`item_other`]]
} }
} }
@ -1480,6 +1503,7 @@ func jobGWithdraw(j Job) {
logOnError(err, "jobGWithdraw : getObjSubType("+strconv.FormatInt(j.Trigger, 10)+")") logOnError(err, "jobGWithdraw : getObjSubType("+strconv.FormatInt(j.Trigger, 10)+")")
if err == nil { if err == nil {
if id == cacheObjSubType[`msg_gstock_any_ack`] { if id == cacheObjSubType[`msg_gstock_any_ack`] {
var isUnique bool
m, err := getObjMsg(j.Trigger) m, err := getObjMsg(j.Trigger)
logOnError(err, "jobGWithdraw : getObjMsg") logOnError(err, "jobGWithdraw : getObjMsg")
rule, err := getMsgParsingRule(m) rule, err := getMsgParsingRule(m)
@ -1492,20 +1516,124 @@ func jobGWithdraw(j Job) {
if req.Code == disp.Code { if req.Code == disp.Code {
p.Items[k].Available = disp.Quantity p.Items[k].Available = disp.Quantity
p.Items[k].Name = disp.Name p.Items[k].Name = disp.Name
p.Items[k].Inspect = false
log.Printf("jobGWithdraw[%d] : Found %s - %s : %d.\n", j.ID64, disp.Code, disp.Name, disp.Quantity) log.Printf("jobGWithdraw[%d] : Found %s - %s : %d.\n", j.ID64, disp.Code, disp.Name, disp.Quantity)
} }
if ok, _ := regexp.MatchString(`^u[0-9]+$`, disp.Code); ok {
isUnique = true
}
} }
} }
p.CleanupMsg = append(p.CleanupMsg, *m)
if isUnique {
for _, req := range p.Items {
if req.Inspect {
p2 := JobPayloadMsgDel{ p2 := JobPayloadMsgDel{
MsgTypeID64: cacheObjSubType[`msg_g_inspect_req`],
Delay: (10 * time.Second), Delay: (10 * time.Second),
ObjMsgID64: j.Trigger, ObjMsgID64: 0,
} }
b2, _ := json.Marshal(p2) b2, _ := json.Marshal(p2)
createJob(cacheObjSubType[`job_msg_del`], objJobPriority, j.UserID64, 0, time.Now().UTC(), b2) createJobCallback(cacheObjSubType[`job_msg_del`], j.UserID64, p2.MsgTypeID64, b2, time.Minute)
p.Inspecting = req.Code
b, _ := json.Marshal(p)
id, err := createJob(cacheObjSubType[`job_gwithdraw`], objJobPriority, j.UserID64, 0, time.Unix(maxUnixTimestamp, 0).UTC(), b)
logOnError(err, "jobGWithdraw : createJob")
setJobCallback(id, int64(bot.Me.ID), cacheObjSubType[`msg_invalid_action`])
setJobCallback(id, int64(bot.Me.ID), cacheObjSubType[`msg_inspect_ack`])
clientSendCWMsg(j.UserID64, fmt.Sprintf("/g_inspect_%s", p.Inspecting))
err = setJobDone(j.ID64)
logOnError(err, "jobGWithdraw : setJobDone")
return
}
}
}
} else if id == cacheObjSubType[`msg_inspect_ack`] {
m, err := getObjMsg(j.Trigger)
logOnError(err, "jobGWithdraw : getObjMsg")
rule, err := getMsgParsingRule(m)
logOnError(err, "jobGWithdraw : getMsgParsingRule")
cwm, err := parseSubTypeMessageInspectAck(m, rule.re)
logOnError(err, "jobGWithdraw : parseSubTypeMessageInspectAck")
for k, req := range p.Items {
if req.Code == p.Inspecting {
p.Items[k].Available = 1
p.Items[k].Name = cwm.Name
p.Items[k].Inspect = false
break
}
}
p.Inspecting = ``
p.CleanupMsg = append(p.CleanupMsg, *m)
for _, req := range p.Items {
if req.Inspect {
p2 := JobPayloadMsgDel{
MsgTypeID64: cacheObjSubType[`msg_g_inspect_req`],
Delay: (10 * time.Second),
ObjMsgID64: 0,
}
b2, _ := json.Marshal(p2)
createJobCallback(cacheObjSubType[`job_msg_del`], j.UserID64, p2.MsgTypeID64, b2, time.Minute)
p.Inspecting = req.Code
b, _ := json.Marshal(p)
id, err := createJob(cacheObjSubType[`job_gwithdraw`], objJobPriority, j.UserID64, 0, time.Unix(maxUnixTimestamp, 0).UTC(), b)
logOnError(err, "jobGWithdraw : createJob")
setJobCallback(id, int64(bot.Me.ID), cacheObjSubType[`msg_invalid_action`])
setJobCallback(id, int64(bot.Me.ID), cacheObjSubType[`msg_inspect_ack`])
clientSendCWMsg(j.UserID64, fmt.Sprintf("/g_inspect_%s", p.Inspecting))
err = setJobDone(j.ID64)
logOnError(err, "jobGWithdraw : setJobDone")
return
}
}
} else if id == cacheObjSubType[`msg_invalid_action`] {
for k, req := range p.Items {
if req.Code == p.Inspecting {
p.Items[k].Available = 1
p.Items[k].Inspect = false
break
}
}
p.Inspecting = ``
m, err := getObjMsg(j.Trigger)
logOnError(err, "jobGWithdraw : getObjMsg")
if err == nil {
p.CleanupMsg = append(p.CleanupMsg, *m)
} }
if id == cacheObjSubType[`msg_msg_job_gwithdraw_ack`] { for _, req := range p.Items {
if req.Inspect {
p2 := JobPayloadMsgDel{
MsgTypeID64: cacheObjSubType[`msg_g_inspect_req`],
Delay: (10 * time.Second),
ObjMsgID64: 0,
}
b2, _ := json.Marshal(p2)
createJobCallback(cacheObjSubType[`job_msg_del`], j.UserID64, p2.MsgTypeID64, b2, time.Minute)
p.Inspecting = req.Code
b, _ := json.Marshal(p)
id, err := createJob(cacheObjSubType[`job_gwithdraw`], objJobPriority, j.UserID64, 0, time.Unix(maxUnixTimestamp, 0).UTC(), b)
logOnError(err, "jobGWithdraw : createJob")
setJobCallback(id, int64(bot.Me.ID), cacheObjSubType[`msg_invalid_action`])
setJobCallback(id, int64(bot.Me.ID), cacheObjSubType[`msg_inspect_ack`])
clientSendCWMsg(j.UserID64, fmt.Sprintf("/g_inspect_%s", p.Inspecting))
err = setJobDone(j.ID64)
logOnError(err, "jobGWithdraw : setJobDone")
return
}
}
} else if id == cacheObjSubType[`msg_msg_job_gwithdraw_ack`] {
m, err := getObjMsg(j.Trigger) m, err := getObjMsg(j.Trigger)
logOnError(err, "jobGWithdraw : getObjMsg") logOnError(err, "jobGWithdraw : getObjMsg")
rule, err := getMsgParsingRule(m) rule, err := getMsgParsingRule(m)

7
msg.go
View File

@ -27,6 +27,13 @@ func getMsgParsingRule(m *ChatWarsMessage) (*MessageParsingRule, error) {
} }
} }
func parseSubTypeMessageInspectAck(m *ChatWarsMessage, r *regexp.Regexp) (*ChatWarsMessageInspectAck, error) {
cwm := ChatWarsMessageInspectAck{}
cwm.Name = r.ReplaceAllString(m.Text, "${Item}")
log.Printf("parseSubTypeMessageInspectAck : Name : %s\n", cwm.Name)
return &cwm, nil
}
func parseSubTypeMessageExchangeAck(m *ChatWarsMessage, r *regexp.Regexp) (*ChatWarsMessageExchangeAck, error) { func parseSubTypeMessageExchangeAck(m *ChatWarsMessage, r *regexp.Regexp) (*ChatWarsMessageExchangeAck, error) {
cwm := ChatWarsMessageExchangeAck{} cwm := ChatWarsMessageExchangeAck{}
deals := []ChatWarsExchangeDeal{} deals := []ChatWarsExchangeDeal{}

9
obj.go
View File

@ -775,20 +775,21 @@ func loadObjJob() error {
timeout time.Time timeout time.Time
user int64 user int64
zpayload []byte zpayload []byte
zipped int
) )
muxObjJob.Lock() muxObjJob.Lock()
cacheObjJob = make(map[int64]Job) cacheObjJob = make(map[int64]Job)
muxObjJob.Unlock() muxObjJob.Unlock()
jobs, err := db.Query(`SELECT o.id, o.obj_sub_type_id, oj.trigger_id, oj.timeout, oj.user_id, oj.payload FROM obj o, obj_job oj WHERE o.id = oj.obj_id;;`) jobs, err := db.Query(`SELECT o.id, o.obj_sub_type_id, oj.trigger_id, oj.timeout, oj.user_id, oj.zipped, oj.payload FROM obj o, obj_job oj WHERE o.id = oj.obj_id;;`)
if err != nil { if err != nil {
return err return err
} }
defer jobs.Close() defer jobs.Close()
for jobs.Next() { for jobs.Next() {
err = jobs.Scan(&id, &type_id, &trigger, &timeout, &user, &zpayload) err = jobs.Scan(&id, &type_id, &trigger, &timeout, &user, &zipped, &zpayload)
if err != nil { if err != nil {
return err return err
} }
@ -799,6 +800,7 @@ func loadObjJob() error {
j.Timeout = timeout j.Timeout = timeout
j.UserID64 = user j.UserID64 = user
if zipped > 0 {
zb := bytes.NewReader(zpayload) zb := bytes.NewReader(zpayload)
zr, err := zlib.NewReader(zb) zr, err := zlib.NewReader(zb)
if err != nil { if err != nil {
@ -809,6 +811,9 @@ func loadObjJob() error {
b.ReadFrom(zr) b.ReadFrom(zr)
payload := b.Bytes() payload := b.Bytes()
j.Payload = payload j.Payload = payload
} else {
j.Payload = zpayload
}
muxObjJob.Lock() muxObjJob.Lock()
cacheObjJob[id] = *j cacheObjJob[id] = *j

1
sql.go
View File

@ -406,6 +406,7 @@ func initDB() {
,started TIMESTAMP ,started TIMESTAMP
,ended TIMESTAMP ,ended TIMESTAMP
,timeout TIMESTAMP ,timeout TIMESTAMP
,zipped TINYINT NOT NULL
,payload VARBINARY(20000) ,payload VARBINARY(20000)
,FOREIGN KEY (obj_id) REFERENCES obj(id) ON DELETE CASCADE ,FOREIGN KEY (obj_id) REFERENCES obj(id) ON DELETE CASCADE
,KEY (is_done) ,KEY (is_done)