This commit is contained in:
shoopea 2020-01-26 14:42:44 +08:00
commit 2f168b12e9
9 changed files with 230 additions and 42 deletions

View File

@ -6,7 +6,6 @@ ChirpNestBot
- [ ] Make sure refresh message receives the message later (jobMsgRefresh)
- [ ] Adjust /time clock for auctions/... (delay between cw and real time ?)
- [ ] Update old auctions with client
- [ ] Convert config to json and insert sql structure in it
- [ ] Add metrics
- [ ] Eliminate cache map race conditions (all maps with read/write) and use redis ?
- [ ] Transform import/export to jobs, feed msg to identification channel instead of inserting directly
@ -15,8 +14,6 @@ ChirpNestBot
- [ ] Update items parsing/identification with gear details (atk/def/mana)
- [ ] Auto identificate weight of items
- [ ] Tribute interception
- [ ] Withdrawal bot
- [x] Crontab : engine
- [ ] Crontab : user interface
- [ ] Resources hidding
- [ ] Resources auto destroy
@ -27,8 +24,12 @@ ChirpNestBot
- [ ] Impersonate
- [ ] Link TelegramUserID and UserID (can use historic auction messages)
- [ ] Issue with Squire in the /g_roles ?
- [x] Foray interception
- [ ] Handle Foray timeout to stop spamming in case something goes bad
- [ ] Insert sql structure in config ?
- [x] Convert config to json
- [x] Foray interception
- [x] Withdrawal bot
- [x] Crontab : engine
- [x] Export/import all messages
- [x] Test HTML in message
- [x] Update auction from broadcast

1
bot.go
View File

@ -880,6 +880,7 @@ func botGDepositAll(m *tb.Message) {
}
p.ResObjID64 = append(p.ResObjID64, getObjItemID(`03`, `Pelt`))
p.ResObjID64 = append(p.ResObjID64, getObjItemID(`04`, `Bone`))
p.ResObjID64 = append(p.ResObjID64, getObjItemID(`05`, `Coal`))
p.ResObjID64 = append(p.ResObjID64, getObjItemID(`07`, `Powder`))
p.ResObjID64 = append(p.ResObjID64, getObjItemID(`08`, `Iron Ore`))
p.ResObjID64 = append(p.ResObjID64, getObjItemID(`09`, `Cloth`))

View File

@ -34,6 +34,21 @@
"name": "Guild withdraw conf ack",
"obj_type": "msg"
},
{
"intl_id": "msg_g_inspect_req",
"name": "Guild inspect reg",
"obj_type": "msg"
},
{
"intl_id": "msg_inspect_ack",
"name": "Item inspect ack",
"obj_type": "msg"
},
{
"intl_id": "msg_invalid_action",
"name": "Item inspect ack",
"obj_type": "msg"
},
{
"intl_id": "msg_war",
"name": "War report",

View File

@ -1830,5 +1830,29 @@
"msg_type": "msg_ny2020_battle",
"chat_id": -1001198527605,
"user_id": 841616455
},
{
"prio": 5000,
"descn": "Invalid action",
"rule": "^\\[invalid action\\]$",
"msg_type": "msg_invalid_action",
"chat_id": 0,
"user_id": 0
},
{
"prio": 5000,
"descn": "Guild inspect req",
"rule": "^/g_inspect_(?P<Item>[a-z0-9]+)$",
"msg_type": "msg_g_inspect_req",
"chat_id": 0,
"user_id": 0
},
{
"prio": 5000,
"descn": "Item inspect ack",
"rule": "^Item: (?P<Name>.*)$",
"msg_type": "msg_inspect_ack",
"chat_id": 0,
"user_id": 0
}
]

6
def.go
View File

@ -334,6 +334,10 @@ type ChatWarsMessageTimeAck struct {
WeatherNext string `json:"weather_next"`
}
type ChatWarsMessageInspectAck struct {
Name string `json:"name"`
}
type ChatWarsMessageJobGWithdrawAck struct {
Msg *ChatWarsMessage `json:"msg"`
Ref string `json:"ref"`
@ -408,6 +412,7 @@ type JobPayloadGWithdrawItem struct {
Name string `json:"name"`
Available int64 `json:"available"`
Required int64 `json:"required"`
Inspect bool `json:"inspect"`
}
type JobPayloadGWithdraw struct {
@ -418,6 +423,7 @@ type JobPayloadGWithdraw struct {
Status int64 `json:"status"`
CleanupMsg []ChatWarsMessage `json:"cleanup_msg"`
Validated bool `json:"validated"`
Inspecting string `json:"inspecting"`
}
type JobPayloadGDeposit struct {

182
job.go
View File

@ -23,12 +23,23 @@ import (
)
func createJob(jobTypeID64 int64, priority int32, userID64 int64, trigger int64, schedule time.Time, payload []byte) (int64, error) {
var zb bytes.Buffer
zw := zlib.NewWriter(&zb)
zw.Write(payload)
zw.Close()
zpayload := zb.Bytes()
var (
zb bytes.Buffer
zpayload []byte
zipped int
)
if len(payload) > 10000 {
zw := zlib.NewWriter(&zb)
zw.Write(payload)
zw.Close()
zpayload = zb.Bytes()
zipped = 1
} else {
zpayload = payload
zipped = 0
}
if len(zpayload) > 20000 {
return 0, errors.New("payload too long")
@ -55,15 +66,15 @@ func createJob(jobTypeID64 int64, priority int32, userID64 int64, trigger int64,
return 0, err
}
stmt, err = db.Prepare(`INSERT INTO obj_job (obj_id, priority, user_id, trigger_id, seq_nr, schedule, is_done, in_work, inserted, timeout, pulled, started, ended, payload)
VALUES (?, ?, ?, ?, NULL, ?, 0, 0, ?, ?, NULL, NULL, NULL, ?);`)
stmt, err = db.Prepare(`INSERT INTO obj_job (obj_id, priority, user_id, trigger_id, seq_nr, schedule, is_done, in_work, inserted, timeout, pulled, started, ended, zipped, payload)
VALUES (?, ?, ?, ?, NULL, ?, 0, 0, ?, ?, NULL, NULL, NULL, ?, ?);`)
logOnError(err, "createJob : prepare insert obj_job")
if err != nil {
return 0, err
}
defer stmt.Close()
_, err = stmt.Exec(objId, priority, userID64, trigger, schedule.UTC(), time.Now().UTC(), time.Unix(maxUnixTimestamp, 0).UTC(), zpayload)
_, err = stmt.Exec(objId, priority, userID64, trigger, schedule.UTC(), time.Now().UTC(), time.Unix(maxUnixTimestamp, 0).UTC(), zipped, zpayload)
logOnError(err, "createJob : insert obj_job")
if err != nil {
return 0, err
@ -145,8 +156,13 @@ func setJobPayload(jobID64 int64, payload []byte) error {
return errors.New("payload too long")
}
<<<<<<< HEAD
stmt, err := db.Prepare(`UPDATE obj_job j SET j.payload = ? WHERE j.obj_id = ?;`)
logOnError(err, "setJobPayload : prepare update obj_job")
=======
stmt, err := db.Prepare(`UPDATE obj_job j SET j.payload = ?, j.zipped = 1 WHERE j.obj_id = ?;`)
logOnError(err, "setJobTimeout : prepare update obj_job")
>>>>>>> refs/remotes/origin/master
if err != nil {
return err
}
@ -227,6 +243,7 @@ func loadCurrentJobs() ([]Job, error) {
userID64 int64
trigger int64
timeout time.Time
zipped int
zpayload []byte
jobs []Job
)
@ -240,7 +257,7 @@ func loadCurrentJobs() ([]Job, error) {
return jobs, err
}
stmt, err := db.Prepare("SELECT o.id, o.obj_sub_type_id, j.trigger_id, j.user_id, j.payload, j.timeout FROM obj_job j, obj o WHERE j.obj_id = o.id AND j.is_done = 0 AND j.in_work = 1 AND j.seq_nr = ? ORDER BY j.priority ASC, j.obj_id ASC;")
stmt, err := db.Prepare("SELECT o.id, o.obj_sub_type_id, j.trigger_id, j.user_id, j.zipped, j.payload, j.timeout FROM obj_job j, obj o WHERE j.obj_id = o.id AND j.is_done = 0 AND j.in_work = 1 AND j.seq_nr = ? ORDER BY j.priority ASC, j.obj_id ASC;")
logOnError(err, "loadCurrentJobs : prepare select statement")
if err != nil {
stmt.Close()
@ -256,18 +273,23 @@ func loadCurrentJobs() ([]Job, error) {
}
for rows.Next() {
err = rows.Scan(&objId, &jobTypeID64, &trigger, &userID64, &zpayload, &timeout)
err = rows.Scan(&objId, &jobTypeID64, &trigger, &userID64, &zipped, &zpayload, &timeout)
logOnError(err, "loadCurrentJobs : scan query rows")
zb := bytes.NewReader(zpayload)
zr, err := zlib.NewReader(zb)
if err != nil {
logOnError(err, "loadCurrentJobs : zlib.NewReader")
continue
var payload []byte
if zipped > 0 {
zb := bytes.NewReader(zpayload)
zr, err := zlib.NewReader(zb)
if err != nil {
logOnError(err, "loadCurrentJobs : zlib.NewReader")
continue
}
b := new(bytes.Buffer)
b.ReadFrom(zr)
payload = b.Bytes()
} else {
payload = zpayload
}
b := new(bytes.Buffer)
b.ReadFrom(zr)
payload := b.Bytes()
job := Job{
ID64: objId,
@ -1445,12 +1467,13 @@ func jobGWithdraw(j Job) {
logOnError(err, "jobGWithdraw : Unmarshal payload")
if p.Status == 0 {
for _, item := range p.Items {
for k, item := range p.Items {
id := getSilentObjItemID(item.Code, ``)
if id != 0 {
obj, _ := getObjItem(id)
p.Status = p.Status | reqTab[obj.ItemTypeID]
} else if ok, _ := regexp.MatchString(`^u[0-9]+$`, item.Code); ok {
p.Items[k].Inspect = true
p.Status = p.Status | reqTab[cacheObjSubType[`item_other`]]
}
}
@ -1480,6 +1503,7 @@ func jobGWithdraw(j Job) {
logOnError(err, "jobGWithdraw : getObjSubType("+strconv.FormatInt(j.Trigger, 10)+")")
if err == nil {
if id == cacheObjSubType[`msg_gstock_any_ack`] {
var isUnique bool
m, err := getObjMsg(j.Trigger)
logOnError(err, "jobGWithdraw : getObjMsg")
rule, err := getMsgParsingRule(m)
@ -1492,20 +1516,124 @@ func jobGWithdraw(j Job) {
if req.Code == disp.Code {
p.Items[k].Available = disp.Quantity
p.Items[k].Name = disp.Name
p.Items[k].Inspect = false
log.Printf("jobGWithdraw[%d] : Found %s - %s : %d.\n", j.ID64, disp.Code, disp.Name, disp.Quantity)
}
if ok, _ := regexp.MatchString(`^u[0-9]+$`, disp.Code); ok {
isUnique = true
}
}
}
p2 := JobPayloadMsgDel{
Delay: (10 * time.Second),
ObjMsgID64: j.Trigger,
}
b2, _ := json.Marshal(p2)
createJob(cacheObjSubType[`job_msg_del`], objJobPriority, j.UserID64, 0, time.Now().UTC(), b2)
}
p.CleanupMsg = append(p.CleanupMsg, *m)
if id == cacheObjSubType[`msg_msg_job_gwithdraw_ack`] {
if isUnique {
for _, req := range p.Items {
if req.Inspect {
p2 := JobPayloadMsgDel{
MsgTypeID64: cacheObjSubType[`msg_g_inspect_req`],
Delay: (10 * time.Second),
ObjMsgID64: 0,
}
b2, _ := json.Marshal(p2)
createJobCallback(cacheObjSubType[`job_msg_del`], j.UserID64, p2.MsgTypeID64, b2, time.Minute)
p.Inspecting = req.Code
b, _ := json.Marshal(p)
id, err := createJob(cacheObjSubType[`job_gwithdraw`], objJobPriority, j.UserID64, 0, time.Unix(maxUnixTimestamp, 0).UTC(), b)
logOnError(err, "jobGWithdraw : createJob")
setJobCallback(id, int64(bot.Me.ID), cacheObjSubType[`msg_invalid_action`])
setJobCallback(id, int64(bot.Me.ID), cacheObjSubType[`msg_inspect_ack`])
clientSendCWMsg(j.UserID64, fmt.Sprintf("/g_inspect_%s", p.Inspecting))
err = setJobDone(j.ID64)
logOnError(err, "jobGWithdraw : setJobDone")
return
}
}
}
} else if id == cacheObjSubType[`msg_inspect_ack`] {
m, err := getObjMsg(j.Trigger)
logOnError(err, "jobGWithdraw : getObjMsg")
rule, err := getMsgParsingRule(m)
logOnError(err, "jobGWithdraw : getMsgParsingRule")
cwm, err := parseSubTypeMessageInspectAck(m, rule.re)
logOnError(err, "jobGWithdraw : parseSubTypeMessageInspectAck")
for k, req := range p.Items {
if req.Code == p.Inspecting {
p.Items[k].Available = 1
p.Items[k].Name = cwm.Name
p.Items[k].Inspect = false
break
}
}
p.Inspecting = ``
p.CleanupMsg = append(p.CleanupMsg, *m)
for _, req := range p.Items {
if req.Inspect {
p2 := JobPayloadMsgDel{
MsgTypeID64: cacheObjSubType[`msg_g_inspect_req`],
Delay: (10 * time.Second),
ObjMsgID64: 0,
}
b2, _ := json.Marshal(p2)
createJobCallback(cacheObjSubType[`job_msg_del`], j.UserID64, p2.MsgTypeID64, b2, time.Minute)
p.Inspecting = req.Code
b, _ := json.Marshal(p)
id, err := createJob(cacheObjSubType[`job_gwithdraw`], objJobPriority, j.UserID64, 0, time.Unix(maxUnixTimestamp, 0).UTC(), b)
logOnError(err, "jobGWithdraw : createJob")
setJobCallback(id, int64(bot.Me.ID), cacheObjSubType[`msg_invalid_action`])
setJobCallback(id, int64(bot.Me.ID), cacheObjSubType[`msg_inspect_ack`])
clientSendCWMsg(j.UserID64, fmt.Sprintf("/g_inspect_%s", p.Inspecting))
err = setJobDone(j.ID64)
logOnError(err, "jobGWithdraw : setJobDone")
return
}
}
} else if id == cacheObjSubType[`msg_invalid_action`] {
for k, req := range p.Items {
if req.Code == p.Inspecting {
p.Items[k].Available = 1
p.Items[k].Inspect = false
break
}
}
p.Inspecting = ``
m, err := getObjMsg(j.Trigger)
logOnError(err, "jobGWithdraw : getObjMsg")
if err == nil {
p.CleanupMsg = append(p.CleanupMsg, *m)
}
for _, req := range p.Items {
if req.Inspect {
p2 := JobPayloadMsgDel{
MsgTypeID64: cacheObjSubType[`msg_g_inspect_req`],
Delay: (10 * time.Second),
ObjMsgID64: 0,
}
b2, _ := json.Marshal(p2)
createJobCallback(cacheObjSubType[`job_msg_del`], j.UserID64, p2.MsgTypeID64, b2, time.Minute)
p.Inspecting = req.Code
b, _ := json.Marshal(p)
id, err := createJob(cacheObjSubType[`job_gwithdraw`], objJobPriority, j.UserID64, 0, time.Unix(maxUnixTimestamp, 0).UTC(), b)
logOnError(err, "jobGWithdraw : createJob")
setJobCallback(id, int64(bot.Me.ID), cacheObjSubType[`msg_invalid_action`])
setJobCallback(id, int64(bot.Me.ID), cacheObjSubType[`msg_inspect_ack`])
clientSendCWMsg(j.UserID64, fmt.Sprintf("/g_inspect_%s", p.Inspecting))
err = setJobDone(j.ID64)
logOnError(err, "jobGWithdraw : setJobDone")
return
}
}
} else if id == cacheObjSubType[`msg_msg_job_gwithdraw_ack`] {
m, err := getObjMsg(j.Trigger)
logOnError(err, "jobGWithdraw : getObjMsg")
rule, err := getMsgParsingRule(m)

7
msg.go
View File

@ -27,6 +27,13 @@ func getMsgParsingRule(m *ChatWarsMessage) (*MessageParsingRule, error) {
}
}
func parseSubTypeMessageInspectAck(m *ChatWarsMessage, r *regexp.Regexp) (*ChatWarsMessageInspectAck, error) {
cwm := ChatWarsMessageInspectAck{}
cwm.Name = r.ReplaceAllString(m.Text, "${Item}")
log.Printf("parseSubTypeMessageInspectAck : Name : %s\n", cwm.Name)
return &cwm, nil
}
func parseSubTypeMessageExchangeAck(m *ChatWarsMessage, r *regexp.Regexp) (*ChatWarsMessageExchangeAck, error) {
cwm := ChatWarsMessageExchangeAck{}
deals := []ChatWarsExchangeDeal{}

27
obj.go
View File

@ -775,20 +775,21 @@ func loadObjJob() error {
timeout time.Time
user int64
zpayload []byte
zipped int
)
muxObjJob.Lock()
cacheObjJob = make(map[int64]Job)
muxObjJob.Unlock()
jobs, err := db.Query(`SELECT o.id, o.obj_sub_type_id, oj.trigger_id, oj.timeout, oj.user_id, oj.payload FROM obj o, obj_job oj WHERE o.id = oj.obj_id;;`)
jobs, err := db.Query(`SELECT o.id, o.obj_sub_type_id, oj.trigger_id, oj.timeout, oj.user_id, oj.zipped, oj.payload FROM obj o, obj_job oj WHERE o.id = oj.obj_id;;`)
if err != nil {
return err
}
defer jobs.Close()
for jobs.Next() {
err = jobs.Scan(&id, &type_id, &trigger, &timeout, &user, &zpayload)
err = jobs.Scan(&id, &type_id, &trigger, &timeout, &user, &zipped, &zpayload)
if err != nil {
return err
}
@ -799,16 +800,20 @@ func loadObjJob() error {
j.Timeout = timeout
j.UserID64 = user
zb := bytes.NewReader(zpayload)
zr, err := zlib.NewReader(zb)
if err != nil {
logOnError(err, "loadObjJob : zlib.NewReader")
continue
if zipped > 0 {
zb := bytes.NewReader(zpayload)
zr, err := zlib.NewReader(zb)
if err != nil {
logOnError(err, "loadObjJob : zlib.NewReader")
continue
}
b := new(bytes.Buffer)
b.ReadFrom(zr)
payload := b.Bytes()
j.Payload = payload
} else {
j.Payload = zpayload
}
b := new(bytes.Buffer)
b.ReadFrom(zr)
payload := b.Bytes()
j.Payload = payload
muxObjJob.Lock()
cacheObjJob[id] = *j

1
sql.go
View File

@ -406,6 +406,7 @@ func initDB() {
,started TIMESTAMP
,ended TIMESTAMP
,timeout TIMESTAMP
,zipped TINYINT NOT NULL
,payload VARBINARY(20000)
,FOREIGN KEY (obj_id) REFERENCES obj(id) ON DELETE CASCADE
,KEY (is_done)