Spoiler: limited hangout
Nice movie bro …
SWI PROLOG: interfacing as a process singleton
Link to SWI page: https://www.swi-prolog.org/contrib/DotNetInterface.md
RUBY SCRIPT:
#!/usr/bin/env ruby
$prologRef = nil
$prologTempDir = "/Users/daniel/Desktop/Temp/"
$prologExe = "/opt/local/bin/swipl"
$isHalted = false
def StartProlog
$prologRef = IO.popen($prologExe,"w")
$isHalted = false
end
def StopProlog
$prologRef.close_write
$isHalted = true
end
def CallProlog(statement)
$prologRef.puts "call((" + statement + "))."
end
def IsTrueProlog(statement)
t = Time.now
srand(t.to_i)
randNum = rand(10000)
$prologRef.puts "tell('" + $prologTempDir + "TEST" + randNum.to_s + "')."
$prologRef.puts "(" + statement + ") -> write('true') ; write('false')."
$prologRef.puts "told."
accumulate = ""
isRead = false
while not isRead
if $isHalted == true
return false
end
begin
File.open($prologTempDir + "TEST" + randNum.to_s) do |query_result|
query_result.each do |line|
accumulate += line
end
isRead = true
end
rescue
end
end
File.delete($prologTempDir + "TEST" + randNum.to_s)
if accumulate != "true"
return false
else
return true
end
end
def QueryProlog(variablelist,query)
t = Time.now
srand(t.to_i)
randNum = rand(10000)
$prologRef.puts "tell('" + $prologTempDir + "QUERY" + randNum.to_s + "')."
$prologRef.puts "findall((" + variablelist + "),(" + query + "),Z),write(Z),fail."
$prologRef.puts "told."
accumulate = ""
isRead = false
while not isRead
if $isHalted == true
return ""
end
begin
File.open($prologTempDir + "QUERY" + randNum.to_s) do |query_result|
query_result.each do |line|
accumulate += line
end
isRead = true
end
rescue
puts "error"
end
end
File.delete($prologTempDir + "QUERY" + randNum.to_s)
return accumulate
end
StartProlog()
CallProlog("assert(dog(dan))")
CallProlog("assert(dog(jim))")
CallProlog("assert(dog(pete))")
for i in 0 ... 100
CallProlog("assert(person(fname(dan" + i.to_s + "),lname(sull" + i.to_s + ")))")
end
f = File.open("/Users/daniel/Desktop/Temp/results.md","w")
f.puts QueryProlog("X,Y","person(X,Y)")
f.puts QueryProlog("X","dog(X)")
f.puts IsTrueProlog("dog(ddddddan)")
f.close_write
StopProlog()
C# wrapper:
using System;
using System.Diagnostics;
using System.Collections;
using System.Collections.Generic;
using System.Text;
using System.Text.RegularExpressions;
using System.IO;
namespace LogicEngineLib
{
public class LogicEXE
{
public Process p = null;
public int PID = -999;
private string _dumpDir;
private string _plcon;
private int _timeOut;
public LogicEXE(string dumpDir, string plCon, int timeoutMilliseconds)
{
this._timeOut = timeoutMilliseconds;
this.p = new Process();
this.p.StartInfo.FileName = plCon;
this.p.StartInfo.RedirectStandardInput = true;
this.p.StartInfo.UseShellExecute = false;
this.p.StartInfo.WindowStyle = ProcessWindowStyle.Hidden;
this.p.StartInfo.CreateNoWindow = true;
this._dumpDir = dumpDir;
this._plcon = plCon;
this.p.Start();
this.PID = this.p.Id;
}
public string ReStart()
{
this.p = new Process();
this.p.StartInfo.FileName = this._plcon;
this.p.StartInfo.RedirectStandardInput = true;
this.p.StartInfo.UseShellExecute = false;
this.p.StartInfo.WindowStyle = ProcessWindowStyle.Hidden;
this.p.StartInfo.CreateNoWindow = true;
this.p.Start();
this.PID = this.p.Id;
return "Restarted service.";
}
public void Write(string statement)
{
this.p.StandardInput.WriteLine(statement);
}
public string Stop()
{
this.p.Close();
return "Process Stopped.";
}
public string Assert(string query)
{
this.Write("assert(" + query + ").");
return "Assert performed.";
}
public string Call(string execString)
{
execString = execString.Replace('.',' ');
string sres = Guid.NewGuid().ToString();
Write("tell('" + this._dumpDir.Replace('\\', '/') + sres + "').");
this.Write("call(" + execString + ") -> write('Call Succeeded') ; write('Call Failed').");
Write("told.");
string rstr = string.Empty;
rstr = SpinAndWaitRead(this._dumpDir + sres);
return rstr;
}
public void Consult(string file)
{
this.Write("consult('" + file.Replace('\\', '/') + "').");
}
public string Save(string fileNamePath)
{
Write("tell('" + fileNamePath.Replace("\\","/") + "').");
Write("listing.");
Write("told.");
return "Save Accomplished";
}
public bool IsTrue(string query)
{
query = query.Replace('.', ' ');
StringBuilder sb = new StringBuilder();
Hashtable ht = new Hashtable(20);
bool hasVariables = false;
string[] vars = GetTokens(query);
foreach (string s in vars)
{
if (s.Trim().Length > 0)
{
char[] carr = s.ToCharArray();
if (Char.IsUpper(carr[0]))
{
string tmp = s.Trim();
if (!ht.Contains(tmp))
{
ht[tmp] = tmp;
sb.Append(" " + s);
}
hasVariables = true;
}
}
}
if (hasVariables) return false;
string sres = Guid.NewGuid().ToString();
Write("tell('" + this._dumpDir.Replace('\\', '/') + sres + "').");
Write("(" + query + ") -> write('True') ; write('False').");
Write("told.");
string rstr = string.Empty;
rstr = SpinAndWaitRead(this._dumpDir + sres);
if (rstr.Trim().Length < 4) return false;
bool result = false;
try
{
result = bool.Parse(rstr);
return result;
}
catch
{
return false;
}
}
public string Query(string query, bool distinctResults)
{
query = query.Replace('.',' ');
StringBuilder sb = new StringBuilder();
Hashtable ht = new Hashtable(20);
bool hasVariables = false;
string[] vars = GetTokens(query);
foreach (string s in vars)
{
if (s.Trim().Length > 0)
{
string tmp1 = s.Trim();
char[] carr = tmp1.ToCharArray();
if (Char.IsUpper(carr[0]))
{
string tmp = s.Trim();
if (!ht.Contains(tmp))
{
ht[tmp] = tmp;
sb.Append(" " + s);
}
hasVariables = true;
}
}
}
string sres = Guid.NewGuid().ToString();
Write("tell('" + this._dumpDir.Replace('\\', '/') + sres + "').");
if (hasVariables)
{
if (distinctResults)
{
Write("setof((" + sb.ToString().Trim().Replace(" ", ",") + ") ,(" + query + "),Z),write(Z),fail.");
}
else
{
string tres = "findall((" + sb.ToString().Trim().Replace(" ", ",") + ") ,(" + query + "),Z),write(Z),fail.";
Write(tres);
}
}
else
{
Write(query + ".");
}
Write("told.");
string rstr = string.Empty;
rstr = SpinAndWaitRead(this._dumpDir + sres);
return rstr;
}
public string Listing()
{
string sres = Guid.NewGuid().ToString();
Write("tell('" + this._dumpDir.Replace('\\', '/') + sres + "').");
Write("listing.");
Write("told.");
string rstr = string.Empty;
rstr = SpinAndWaitRead(this._dumpDir + sres);
return rstr;
}
private string SpinAndWaitRead(string fileName)
{
StreamReader sr = null;
string rstr = string.Empty;
int counter = 0;
bool done = false;
while (!done)
{
try
{
if (File.Exists(fileName))
{
sr = new StreamReader(fileName);
rstr = sr.ReadToEnd();
sr.Close();
done = true;
}
}
catch
{
continue;
}
counter++;
System.Threading.Thread.Sleep(1);
if (counter == this._timeOut) break;
}
try
{
File.Delete(fileName);
}
catch { }
return rstr;
}
private string[] GetTokens(string prologStatement)
{
string[] vars = Regex.Split(prologStatement, @"[,)(=;+:%$@~^/\-><@&*!]");
return vars;
}
}
}
Still weird …
So …
Up until a few days ago, and for the last year that I’ve had the T-MOBILE hotspot, my primary IP address pointed to a location in WA State, usually Tukwilla or Seatrack.
But for the last couple days (I surmise) my IP has pointed to Tampa, Florida.
This could be related to the fires in LA, might be … and it is diagnostically interesting from the perspective of collapse, cascading failures in complex systems.
Now imagine a 2 X LA FIRE scenario if different parts of the country, or 4 times …
Imagine what that looks like.
“DECENTRALIZED” …
OpenAI: losing money …
HEADLINE TRENDING: Part 1 (RSS)
This is a project I am just beginning. The concept is to align and link the headline trend data to other FRED or BLS or MARKET longitudinal data.
This might be on hold for a few weeks, but it’s a start.
CURRENT MYSQL DB DEFINITION:
-- MySQL dump 10.13 Distrib 8.0.39, for Win64 (x86_64)
--
-- Host: localhost Database: news
-- ------------------------------------------------------
-- Server version 8.0.39
/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
/*!50503 SET NAMES utf8mb4 */;
/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
/*!40103 SET TIME_ZONE='+00:00' */;
/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
--
-- Table structure for table `headline_token`
--
DROP TABLE IF EXISTS `headline_token`;
/*!40101 SET @saved_cs_client = @@character_set_client */;
/*!50503 SET character_set_client = utf8mb4 */;
CREATE TABLE `headline_token` (
`headline_token_id` bigint NOT NULL AUTO_INCREMENT,
`rss_id` bigint NOT NULL,
`year` int NOT NULL,
`month` int NOT NULL,
`day` int NOT NULL,
`word` varchar(200) DEFAULT NULL,
`freq` int DEFAULT NULL,
`created_on` timestamp NULL DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY (`headline_token_id`),
KEY `idx_headline_token` (`rss_id`,`year`,`month`,`day`,`word`)
) ENGINE=InnoDB AUTO_INCREMENT=64162 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci;
/*!40101 SET character_set_client = @saved_cs_client */;
--
-- Table structure for table `log`
--
DROP TABLE IF EXISTS `log`;
/*!40101 SET @saved_cs_client = @@character_set_client */;
/*!50503 SET character_set_client = utf8mb4 */;
CREATE TABLE `log` (
`log_id` bigint NOT NULL AUTO_INCREMENT,
`step` varchar(100) NOT NULL,
`log_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY (`log_id`)
) ENGINE=InnoDB AUTO_INCREMENT=1248 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci;
/*!40101 SET character_set_client = @saved_cs_client */;
--
-- Table structure for table `rss`
--
DROP TABLE IF EXISTS `rss`;
/*!40101 SET @saved_cs_client = @@character_set_client */;
/*!50503 SET character_set_client = utf8mb4 */;
CREATE TABLE `rss` (
`ID` bigint NOT NULL AUTO_INCREMENT,
`SOURCE` varchar(100) CHARACTER SET utf8mb3 COLLATE utf8mb3_unicode_ci NOT NULL,
`LINK` varchar(255) CHARACTER SET utf8mb3 COLLATE utf8mb3_unicode_ci NOT NULL,
`TITLE` varchar(400) CHARACTER SET utf8mb3 COLLATE utf8mb3_unicode_ci NOT NULL,
`PUBLISHED` datetime NOT NULL,
PRIMARY KEY (`ID`),
UNIQUE KEY `unique_link` (`LINK`)
) ENGINE=InnoDB AUTO_INCREMENT=5491 DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_unicode_ci;
/*!40101 SET character_set_client = @saved_cs_client */;
/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
-- Dump completed on 2024-12-09 9:49:20
GRABBING HEADLINES FROM RSS FEEDS:
from __future__ import print_function
import os
import feedparser
import os.path, time
import json
import math
import time
import urllib.parse as pr
import xml.etree.ElementTree as ET
from bs4 import BeautifulSoup as BS
from requests import get
from os.path import exists
from socket import socket, AF_INET, SOCK_STREAM
from decimal import Decimal
from datetime import datetime, date, timedelta
from anyascii import anyascii
import mysql.connector
from unidecode import unidecode
usern = "root"
passw = "password"
dbn = "NEWS"
servern = "localhost"
portn = 3306
#track number of records added
record_load = 0
def logrun(step):
cnx = mysql.connector.connect(user=usern, database=dbn, password=passw, host=servern, port=portn)
cur = cnx.cursor()
qry = """
INSERT INTO log
(step)
VALUES
('<<ARG>>')
""".replace("<<ARG>>", step)
cur.execute(qry)
cnx.commit()
cnx.close()
def AlreadySaved(link):
# Connect with the MySQL Server
cnx = mysql.connector.connect(user=usern, database=dbn, password=passw, host=servern, port=portn)
qry = "select ID from RSS where LINK = '" + link + "'"
cur = cnx.cursor(buffered=True)
cur.execute(qry)
cur.fetchall()
rc = cur.rowcount
cnx.close()
if rc > 0:
return True
else:
return False
def SaveRSS(source, title, link, published):
global record_load
record_load += 1
tit = title.replace("'", "''")
cnx = mysql.connector.connect(user=usern, database=dbn, password=passw, host=servern, port=portn)
cur = cnx.cursor()
qry = """
INSERT INTO RSS
(SOURCE,
LINK,
TITLE,
PUBLISHED)
VALUES
(%s,%s,%s,%s)
"""
val = (source, link, tit, published)
cur.execute(qry, val)
cnx.commit()
cnx.close()
def GrabRSS(RssURL, SourceName):
hdrs = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'}
NewsFeed = feedparser.parse(RssURL)
for na in NewsFeed.entries:
try:
print(na.title)
print(na.link)
print(na.published)
print(na.published_parsed)
except:
logrun("ERROR: GrabRSS from " + SourceName)
continue
if AlreadySaved(na.link.strip().upper()):
continue
print("*************************")
tyr = na.published_parsed[0]
tmn = na.published_parsed[1]
tdy = na.published_parsed[2]
thr = na.published_parsed[3]
tmi = na.published_parsed[4]
tsc = na.published_parsed[5]
ptms = "%s-%s-%s %s:%s:%s" % (tyr, tmn, tdy, thr, tmi, tsc)
SaveRSS(SourceName, unidecode(na.title), na.link.strip().upper(), ptms)
def debugHere():
input("Press enter to continue ...")
def clearConsole():
command = 'clear'
if os.name in ('nt', 'dos'): # If Machine is running on Windows, use cls
command = 'cls'
os.system(command)
def CycleFeeds():
logrun("start: CycleFeeds")
infowars = "https://www.infowars.com/rss.xml"
zh = "https://feeds.feedburner.com/zerohedge/feed"
yahoo = "https://news.yahoo.com/rss/"
bbcworld = "https://feeds.bbci.co.uk/news/world/rss.xml"
bbc = "http://feeds.bbci.co.uk/news/world/us_and_canada/rss.xml"
nyt = "https://rss.nytimes.com/services/xml/rss/nyt/HomePage.xml"
cnbc = "https://www.cnbc.com/id/100727362/device/rss/rss.html"
fox = "https://moxie.foxnews.com/google-publisher/latest.xml"
aljazeera = "https://www.aljazeera.com/xml/rss/all.xml"
cbs = "https://www.cbsnews.com/latest/rss/world"
f24 = "https://www.france24.com/en/rss"
gnews = "https://globalnews.ca/world/feed/"
wtimes = "https://www.washingtontimes.com/rss/headlines/news/world"
sydher = "https://www.smh.com.au/rss/world.xml"
skynews = "https://feeds.skynews.com/feeds/rss/world.xml"
latimes = "https://www.latimes.com/world/rss2.0.xml"
tindia = "https://timesofindia.indiatimes.com/rssfeeds/296589292.cms"
rt = "https://www.rt.com/rss/news/"
sun = "https://www.thesun.co.uk/news/worldnews/feed/"
mirror = "https://www.mirror.co.uk/news/world-news/?service=rss"
vox = "https://www.vox.com/rss/world-politics/index.xml"
wotr = "https://warontherocks.com/feed/"
hot = "https://www.headlinesoftoday.com/feed"
wnera = "https://worldnewsera.com/feed/"
gpress = "https://globalpressjournal.com/feed/"
indep = "https://www.independent.co.uk/news/world/rss"
spiegel = "https://www.spiegel.de/international/index.rss"
guard = "https://www.theguardian.com/world/rss"
GrabRSS(guard, "GUARDIAN")
GrabRSS(spiegel, "DER SPIEGEL")
GrabRSS(indep, "INDEPENDENT")
GrabRSS(gpress, "Global Press Journal")
GrabRSS(wnera, "World News Era")
GrabRSS(hot, "Headlines of Today")
GrabRSS(wotr, "WAR ON THE ROCKS")
GrabRSS(vox, "VOX")
GrabRSS(mirror, "MIRROR")
GrabRSS(sun, "SUN")
GrabRSS(rt, "RT")
GrabRSS(tindia, "Times of India")
GrabRSS(latimes, "LA TIMES")
GrabRSS(skynews, "SKY NEWS")
GrabRSS(sydher, "Sydney Herald")
GrabRSS(wtimes, "WA TIMES")
GrabRSS(gnews, "Global News")
GrabRSS(f24, "FRANCE24")
GrabRSS(cbs, "CBS")
GrabRSS(aljazeera, "ALJAZEERA")
GrabRSS(fox, "FOX")
GrabRSS(cnbc, "CNBC")
GrabRSS(bbcworld, "BBC WORLD")
GrabRSS(infowars, "INFOWARS")
GrabRSS(zh, "ZEROHEDGE")
GrabRSS(yahoo, "YAHOO")
GrabRSS(bbc, "BBC")
GrabRSS(nyt, "NYT")
logrun("stop: CycleFeeds, records: " + str(record_load))
CycleFeeds()
TOKENIZE HEADLINES:
#parse headlines
import re
import mysql.connector
from anyascii import anyascii
#f = open("demofile2.txt", "a")
#f.write("Now the file has more content!")
#f.close()
#open and read the file after the appending:
#f = open("demofile2.txt", "r")
#print(f.read())
usern = "root"
passw = "password"
dbn = "NEWS"
servern = "localhost"
portn = 3306
def GetSql(qry):
# Connect with the MySQL Server
cnx = mysql.connector.connect(user=usern, database=dbn, password=passw, host=servern, port=portn)
cur = cnx.cursor(buffered=True)
cur.execute(qry)
retRes = cur.fetchall()
cnx.close()
return retRes
def SaveToks(rss_id, year, month, day, word, freq):
cnx = mysql.connector.connect(user=usern, database=dbn, password=passw, host=servern, port=portn)
cur = cnx.cursor()
qry = """
INSERT INTO headline_token
(rss_id,
year,
month,
day,
word,
freq)
VALUES
(%s,%s,%s,%s,%s,%s)
"""
val = (rss_id, year, month, day, word, freq)
cur.execute(qry, val)
cnx.commit()
cnx.close()
def allHeadlines():
headln = """
select
r.id,
r.title,
r.published
from rss r
left outer join headline_token ht
on r.id = ht.rss_id
where ht.rss_id is null;
"""
rset = GetSql(headln)
f = open("news_token.txt", "w")
f.write("ID")
f.write('\t')
f.write("YEAR")
f.write('\t')
f.write("MONTH")
f.write('\t')
f.write("DAY")
f.write('\t')
f.write("WORD")
f.write('\t')
f.write("FREQUENCY")
f.write('\n')
cntr = 0
for rec in rset:
cntr += 1
print(str(cntr))
id = rec[0]
tit = rec[1]
pub = rec[2]
parts = str(pub).split()
parts2 = parts[0].split('-')
yr = int(parts2[0])
mn = int(parts2[1])
dy = int(parts2[2])
tit = re.sub(r"(?<=\d),(?=\d)", "", tit)
tit = tit.replace("''","'")
tit = tit.replace("'","")
tit = tit.replace(","," ")
tit = tit.replace(":", " ")
#&+
tit = tit.replace("&", " ")
tit = tit.replace("+", " ")
tit = tit.replace("\"", " ")
tit = tit.replace("(", " ")
tit = tit.replace(")", " ")
tit = tit.replace("|", " ")
tit = tit.replace("?", " ")
tit = tit.replace(";", " ")
tit = tit.replace("...", " ")
tit = tit.upper()
tits = tit.split()
dic = {}
for t in tits:
t2 = t.strip()
if t2 in dic:
dic[t2] += 1
else:
dic[t2] = 1
for k in dic.keys():
f.write(str(id))
f.write('\t')
f.write(str(yr))
f.write('\t')
f.write(str(mn))
f.write('\t')
f.write(str(dy))
f.write('\t')
#f.write(str(k.encode("UTF-8")))
f.write(anyascii(k))
f.write('\t')
f.write(str(dic[k]))
f.write('\n')
SaveToks(id, yr, mn, dy, anyascii(k), dic[k])
f.close()
allHeadlines()
Checkout Results:
select * from
(
select
year,
month,
day,
word,
sum(freq) as frq
from headline_token
where not word in
('TO','IN','OF','THE','FOR','AND','ON',
'AFTER','AS','A','IS','WITH','AT','BY',
'HOW','BE','ABOUT','-','HIS','HER','MORE',
'IT','WILL','HE','HAS', 'SEE', 'THEY', 'SHE',
'LAST','GOES','NEXT', 'SAYS', 'FROM', 'WHAT',
'MY', 'THEY', 'THEM', 'I', 'MOST', 'COULD',
'WHERE', 'BUT', 'AN', 'WE', 'HAVE')
group by
year,
month,
day,
word
order by year asc, month asc, day asc, sum(freq) desc
) t
where t.frq > 8;
Combined Script:
from __future__ import print_function
import os
import re
import feedparser
import os.path, time
import json
import math
import time
import urllib.parse as pr
import xml.etree.ElementTree as ET
from bs4 import BeautifulSoup as BS
from requests import get
from os.path import exists
from socket import socket, AF_INET, SOCK_STREAM
from decimal import Decimal
from datetime import datetime, date, timedelta
from anyascii import anyascii
import mysql.connector
from unidecode import unidecode
usern = "root"
passw = "password"
dbn = "NEWS"
servern = "localhost"
portn = 3306
#track number of records added
record_load = 0
def GetSql(qry):
# Connect with the MySQL Server
cnx = mysql.connector.connect(user=usern, database=dbn, password=passw, host=servern, port=portn)
cur = cnx.cursor(buffered=True)
cur.execute(qry)
retRes = cur.fetchall()
cnx.close()
return retRes
def logrun(step):
cnx = mysql.connector.connect(user=usern, database=dbn, password=passw, host=servern, port=portn)
cur = cnx.cursor()
qry = """
INSERT INTO log
(step)
VALUES
('<<ARG>>')
""".replace("<<ARG>>", step)
cur.execute(qry)
cnx.commit()
cnx.close()
def AlreadySaved(link):
# Connect with the MySQL Server
cnx = mysql.connector.connect(user=usern, database=dbn, password=passw, host=servern, port=portn)
qry = "select ID from RSS where LINK = '" + link + "'"
cur = cnx.cursor(buffered=True)
cur.execute(qry)
cur.fetchall()
rc = cur.rowcount
cnx.close()
if rc > 0:
return True
else:
return False
def SaveRSS(source, title, link, published):
global record_load
record_load += 1
tit = title.replace("'", "''")
cnx = mysql.connector.connect(user=usern, database=dbn, password=passw, host=servern, port=portn)
cur = cnx.cursor()
qry = """
INSERT INTO RSS
(SOURCE,
LINK,
TITLE,
PUBLISHED)
VALUES
(%s,%s,%s,%s)
"""
val = (source, link, tit, published)
cur.execute(qry, val)
cnx.commit()
cnx.close()
def GrabRSS(RssURL, SourceName):
hdrs = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'}
NewsFeed = feedparser.parse(RssURL)
for na in NewsFeed.entries:
try:
print(na.title)
print(na.link)
print(na.published)
print(na.published_parsed)
except:
logrun("ERROR: GrabRSS from " + SourceName)
continue
if AlreadySaved(na.link.strip().upper()):
continue
print("*************************")
tyr = na.published_parsed[0]
tmn = na.published_parsed[1]
tdy = na.published_parsed[2]
thr = na.published_parsed[3]
tmi = na.published_parsed[4]
tsc = na.published_parsed[5]
ptms = "%s-%s-%s %s:%s:%s" % (tyr, tmn, tdy, thr, tmi, tsc)
SaveRSS(SourceName, unidecode(na.title), na.link.strip().upper(), ptms)
def debugHere():
input("Press enter to continue ...")
def clearConsole():
command = 'clear'
if os.name in ('nt', 'dos'): # If Machine is running on Windows, use cls
command = 'cls'
os.system(command)
def SaveToks(rss_id, year, month, day, word, freq):
cnx = mysql.connector.connect(user=usern, database=dbn, password=passw, host=servern, port=portn)
cur = cnx.cursor()
qry = """
INSERT INTO headline_token
(rss_id,
year,
month,
day,
word,
freq)
VALUES
(%s,%s,%s,%s,%s,%s)
"""
val = (rss_id, year, month, day, word, freq)
cur.execute(qry, val)
cnx.commit()
cnx.close()
def allHeadlines():
headln = """
select
r.id,
r.title,
r.published
from rss r
left outer join headline_token ht
on r.id = ht.rss_id
where ht.rss_id is null;
"""
rset = GetSql(headln)
f = open("news_token.txt", "w")
f.write("ID")
f.write('\t')
f.write("YEAR")
f.write('\t')
f.write("MONTH")
f.write('\t')
f.write("DAY")
f.write('\t')
f.write("WORD")
f.write('\t')
f.write("FREQUENCY")
f.write('\n')
cntr = 0
for rec in rset:
cntr += 1
print(str(cntr))
id = rec[0]
tit = rec[1]
pub = rec[2]
parts = str(pub).split()
parts2 = parts[0].split('-')
yr = int(parts2[0])
mn = int(parts2[1])
dy = int(parts2[2])
tit = re.sub(r"(?<=\d),(?=\d)", "", tit)
tit = tit.replace("''","'")
tit = tit.replace("'","")
tit = tit.replace(","," ")
tit = tit.replace(":", " ")
#&+
tit = tit.replace("&", " ")
tit = tit.replace("+", " ")
tit = tit.replace("\"", " ")
tit = tit.replace("(", " ")
tit = tit.replace(")", " ")
tit = tit.replace("|", " ")
tit = tit.replace("?", " ")
tit = tit.replace(";", " ")
tit = tit.replace("...", " ")
tit = tit.upper()
tits = tit.split()
dic = {}
for t in tits:
t2 = t.strip()
if t2 in dic:
dic[t2] += 1
else:
dic[t2] = 1
for k in dic.keys():
f.write(str(id))
f.write('\t')
f.write(str(yr))
f.write('\t')
f.write(str(mn))
f.write('\t')
f.write(str(dy))
f.write('\t')
#f.write(str(k.encode("UTF-8")))
f.write(anyascii(k))
f.write('\t')
f.write(str(dic[k]))
f.write('\n')
SaveToks(id, yr, mn, dy, anyascii(k), dic[k])
f.close()
def CycleFeeds():
logrun("start: CycleFeeds")
infowars = "https://www.infowars.com/rss.xml"
zh = "https://feeds.feedburner.com/zerohedge/feed"
yahoo = "https://news.yahoo.com/rss/"
bbcworld = "https://feeds.bbci.co.uk/news/world/rss.xml"
bbc = "http://feeds.bbci.co.uk/news/world/us_and_canada/rss.xml"
nyt = "https://rss.nytimes.com/services/xml/rss/nyt/HomePage.xml"
cnbc = "https://www.cnbc.com/id/100727362/device/rss/rss.html"
fox = "https://moxie.foxnews.com/google-publisher/latest.xml"
aljazeera = "https://www.aljazeera.com/xml/rss/all.xml"
cbs = "https://www.cbsnews.com/latest/rss/world"
f24 = "https://www.france24.com/en/rss"
gnews = "https://globalnews.ca/world/feed/"
wtimes = "https://www.washingtontimes.com/rss/headlines/news/world"
sydher = "https://www.smh.com.au/rss/world.xml"
skynews = "https://feeds.skynews.com/feeds/rss/world.xml"
latimes = "https://www.latimes.com/world/rss2.0.xml"
tindia = "https://timesofindia.indiatimes.com/rssfeeds/296589292.cms"
rt = "https://www.rt.com/rss/news/"
sun = "https://www.thesun.co.uk/news/worldnews/feed/"
mirror = "https://www.mirror.co.uk/news/world-news/?service=rss"
vox = "https://www.vox.com/rss/world-politics/index.xml"
wotr = "https://warontherocks.com/feed/"
wnera = "https://worldnewsera.com/feed/"
gpress = "https://globalpressjournal.com/feed/"
indep = "https://www.independent.co.uk/news/world/rss"
spiegel = "https://www.spiegel.de/international/index.rss"
guard = "https://www.theguardian.com/world/rss"
GrabRSS(guard, "GUARDIAN")
GrabRSS(spiegel, "DER SPIEGEL")
GrabRSS(indep, "INDEPENDENT")
GrabRSS(gpress, "Global Press Journal")
GrabRSS(wnera, "World News Era")
GrabRSS(wotr, "WAR ON THE ROCKS")
GrabRSS(vox, "VOX")
GrabRSS(mirror, "MIRROR")
GrabRSS(sun, "SUN")
GrabRSS(rt, "RT")
GrabRSS(tindia, "Times of India")
GrabRSS(latimes, "LA TIMES")
GrabRSS(skynews, "SKY NEWS")
GrabRSS(sydher, "Sydney Herald")
GrabRSS(wtimes, "WA TIMES")
GrabRSS(gnews, "Global News")
GrabRSS(f24, "FRANCE24")
GrabRSS(cbs, "CBS")
GrabRSS(aljazeera, "ALJAZEERA")
GrabRSS(fox, "FOX")
GrabRSS(cnbc, "CNBC")
GrabRSS(bbcworld, "BBC WORLD")
GrabRSS(infowars, "INFOWARS")
GrabRSS(zh, "ZEROHEDGE")
GrabRSS(yahoo, "YAHOO")
GrabRSS(bbc, "BBC")
GrabRSS(nyt, "NYT")
logrun("stop: CycleFeeds, records: " + str(record_load))
def WriteOut(yr, mn, dy):
strSql = """
select
h.word,
sum(h.freq) as frq
from headline_token h
where h.year = <<YEAR>> and h.month = <<MONTH>> and h.day = <<DAY>> and
not h.word in
('TO','IN','OF','THE','FOR','AND','ON',
'AFTER','AS','A','IS','WITH','AT','BY',
'HOW','BE','ABOUT','-','HIS','HER','MORE',
'IT','WILL','HE','HAS', 'SEE', 'THEY', 'SHE',
'LAST','GOES','NEXT', 'SAYS', 'FROM', 'WHAT',
'MY', 'THEY', 'THEM', 'I', 'MOST', 'COULD',
'WHERE', 'BUT', 'AN', 'WE', 'HAVE', 'NEW')
group by
h.word
order by sum(h.freq) desc;
"""
strSql = strSql.replace("<<YEAR>>", str(yr))
strSql = strSql.replace("<<MONTH>>", str(mn))
strSql = strSql.replace("<<DAY>>", str(dy))
rset = GetSql(strSql)
fname = str(yr) + "-" + str(mn) + "-" + str(dy) + ".txt"
f = open(fname, "w")
for rec in rset:
f.write(str(rec[0]))
f.write('\t')
f.write(str(rec[1]))
f.write('\n')
f.close()
CycleFeeds()
allHeadlines()
ssql = """
select distinct year, month, day
from headline_token
order by year asc, month asc, day asc;
"""
rset2 = GetSql(ssql)
count = 0
for r in rset2:
count += 1
print(str(count))
WriteOut(r[0],r[1],r[2])
GRAB IT
import urllib.request, urllib.error, urllib.parse
import requests
import re
from bs4 import BeautifulSoup
url = 'https://www.soccerfuncamp.com/'
response = urllib.request.urlopen(url)
webContent = response.read().decode('UTF-8')
soup = BeautifulSoup(webContent.lower(), 'html.parser')
lst = re.findall('mailto[:]\\S+@\\S+', webContent.lower())
for l in lst:
print(l)
for link in soup.find_all('a'):
if link.get('href')[0:4] == "http":
print(link.get('href'))
#print(webContent)
SENDMAIL.py
import sys
from email.mime.text import MIMEText
from subprocess import Popen, PIPE
msg = MIMEText("Here is the body of my message")
msg["From"] = "[email protected]"
msg["To"] = "[email protected]"
msg["Subject"] = "This is the subject."
p = Popen(["/usr/sbin/sendmail", "-t", "-oi"], stdin=PIPE)
# Both Python 2.X and 3.X
p.communicate(msg.as_bytes() if sys.version_info >= (3,0) else msg.as_string())
# Python 2.X
p.communicate(msg.as_string())
# Python 3.X
p.communicate(msg.as_bytes())
Weather and Climate Modification
Calling all Sovietologists …
- AI’s implement synthetic scripts
- AI’s, as of this moment, show ZERO evidence of “awareness”
- It is UNLIKELY that an AI could drive someone to suicide UNLESS that person is close to doing it AND feeding the AI (GIGO) with their own projected depressed sadness
An AI might be used as a trolling process, but it’s entirely likely that ONCE the kids are engaged, the chat sessions are handed off to EVIL pieces of shit that pay for the privilege to do THIS and worse to kids.
AI’s can’t do this, and there is more to this story … I think …
Process: FT8 file from WSJT-X
import re
import maidenhead as mh
fl = "C:\\Users\\danielsullivan\\AppData\\Local\\WSJT-X\\ALL.TXT"
fw = "callsign_locations.txt"
lines = None
with open(fl, "r") as rf:
lines = rf.readlines()
with open(fw, "w") as wf:
wf.write("CALLSIGN")
wf.write('\t')
wf.write("DATE")
wf.write('\t')
wf.write("FREQ")
wf.write('\t')
wf.write("SNR")
wf.write('\t')
wf.write("DRIFT_SECONDS")
wf.write('\t')
wf.write("GRID")
wf.write('\t')
wf.write("LAT")
wf.write('\t')
wf.write("LNG")
wf.write('\n')
for ln in lines:
utcd = ln[0:17].strip()
pts = utcd.split('_')
yr = pts[0][0:2]
mn = pts[0][2:4]
dy = pts[0][4:6]
dts = mn + "/" + dy + "/" + yr
frq = ln[17:24].strip()
snr = ln[30:38].strip()
timedrift = ln[37:43].strip()
msg = ln[47:].strip()
grd = ""
lat = None
lng = None
mparts = msg.split(' ')
if len(mparts) == 3:
chk = mparts[2].strip()
if len(chk) == 4:
if chk[0].isalpha() and \
chk[1].isalpha() and \
chk[2].isdigit() and \
chk[3].isdigit():
print(chk)
ll = mh.to_location(chk)
grd = chk
lat = ll[0]
lng = ll[1]
wf.write(mparts[1])
wf.write('\t')
wf.write(dts)
wf.write('\t')
wf.write(frq)
wf.write('\t')
wf.write(snr)
wf.write('\t')
wf.write(timedrift)
wf.write('\t')
wf.write(grd)
wf.write('\t')
wf.write(str(lat))
wf.write('\t')
wf.write(str(lng))
wf.write('\n')
Your own daily dose … (of the news)
- Below is a general recipe for experimenting with RSS feeds AND speech synthesizers.
- For the speech synthesis there are two scripts, very similar, one will work with ESPEAK (free open source), the other works with Microsoft SAPI.
- In order to run these scripts you will need MYSQL installed. You will need a minimum level of understanding of how MYSQL works. You can easily translate the database piece to ODBC, and the rest to PowerShell or whatever. That’s your business, not mine.
- Once you’ve installed MYSQL and the server is running, create a database called “NEWS”: create database NEWS;
- After you’ve created the NEWS database, using the CLI (command line interface) as above, type command: use NEWS;
- Once you are in the NEWS database, copy and paste the entire script below into the CLI or save as text file and consult from the CLI using the command: source rss.sql (assuming you stored the create table text below in that file)
- In the example I’m using the ROOT database, why? – because IDGAF. But best practice is to create special database users with limited permissions. If you’ve installed your MYSQL database without granting permission to external (port) connections? – then it’s not a concern.
- Running the aggregator might trigger a site to block you or even your own network. This behavior, which was innocuous 20 years ago, is now attacked and classified as an aggressive network behavior. Just be careful.
- After you’ve run the aggregation script (and the script can be run by CRON or Task Manager daily or hourly if you like), then you can run one of the speech synthesis apps, reading headlines.
- If you have a compatible shortwave radio, with upper and lower side band, and a LINUX computer running JS8 Call with appropriate libraries for CAT control? – then look into this and you can set up a headline service over shortwave: https://planetarystatusreport.com/?p=7432
Have fun getting your daily dose of the fucking news.
Create Table Script for RSS Database
SET SQL_MODE = "NO_AUTO_VALUE_ON_ZERO";
SET AUTOCOMMIT = 0;
START TRANSACTION;
SET time_zone = "+00:00";
CREATE TABLE `RSS` (
`ID` bigint(20) NOT NULL,
`SOURCE` varchar(100) COLLATE utf8_unicode_ci NOT NULL,
`LINK` varchar(255) COLLATE utf8_unicode_ci NOT NULL,
`TITLE` varchar(400) COLLATE utf8_unicode_ci NOT NULL,
`PUBLISHED` datetime NOT NULL,
`ARTICLE` text COLLATE utf8_unicode_ci NOT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci;
ALTER TABLE `RSS`
ADD PRIMARY KEY (`ID`),
ADD UNIQUE KEY `unique_link` (`LINK`);
ALTER TABLE `RSS`
MODIFY `ID` bigint(20) NOT NULL AUTO_INCREMENT;
COMMIT;
Python Script for Aggregating RSS Feeds and storing stories locally
from __future__ import print_function
import os
import feedparser
import os.path, time
import json
import math
import time
import urllib.parse as pr
import xml.etree.ElementTree as ET
from bs4 import BeautifulSoup as BS
from requests import get
from os.path import exists
from socket import socket, AF_INET, SOCK_STREAM
from decimal import Decimal
from datetime import datetime, date, timedelta
from anyascii import anyascii
import mysql.connector
usern = "root"
passw = "password"
dbn = "NEWS"
servern = "localhost"
portn = 3306
newsServiceM3 = "ZEROHEDGE"
retHeadlines = 4
newsMode = 3
bigSleep = 90
def GetArt(number):
# Connect with the MySQL Server
cnx = mysql.connector.connect(user=usern, database=dbn, password=passw, host=servern, port=portn)
qry = "select ARTICLE, SOURCE, LINK from RSS where ID = %s" % (number)
cur = cnx.cursor(buffered=True)
cur.execute(qry)
retRes = cur.fetchall()
cnx.close()
return retRes[0]
def GetTopHourly(source):
# Connect with the MySQL Server
cnx = mysql.connector.connect(user=usern, database=dbn, password=passw, host=servern, port=portn)
qry = "select ID, TITLE, PUBLISHED, SOURCE, length(ARTICLE) as LOF from RSS where SOURCE = '%s' order by PUBLISHED desc limit 1" % source
cur = cnx.cursor(buffered=True)
cur.execute(qry)
retRes = cur.fetchall()
cnx.close()
return retRes
def GetTop(source, number):
# Connect with the MySQL Server
cnx = mysql.connector.connect(user=usern, database=dbn, password=passw, host=servern, port=portn)
qry = "select ID, TITLE, PUBLISHED, SOURCE, length(ARTICLE) as LOF from RSS where SOURCE = '%s' order by PUBLISHED desc limit %s" % (source, number)
cur = cnx.cursor(buffered=True)
cur.execute(qry)
retRes = cur.fetchall()
cnx.close()
return retRes
def AlreadySaved(link):
# Connect with the MySQL Server
cnx = mysql.connector.connect(user=usern, database=dbn, password=passw, host=servern, port=portn)
qry = "select ID from RSS where LINK = '" + link + "'"
cur = cnx.cursor(buffered=True)
cur.execute(qry)
cur.fetchall()
rc = cur.rowcount
cnx.close()
if rc > 0:
return True
else:
return False
def SaveRSS(source, title, link, published, article):
tit = title.replace("'", "''")
clean_text = anyascii(article)
art = str(clean_text)
art = art.replace("'", "''")
if len(art) > 5000:
art = art[0:5000]
cnx = mysql.connector.connect(user=usern, database=dbn, password=passw, host=servern, port=portn)
cur = cnx.cursor()
qry = """
INSERT INTO RSS
(SOURCE,
LINK,
TITLE,
PUBLISHED,
ARTICLE)
VALUES
(%s,%s,%s,%s,%s)
"""
val = (source, link, tit, published, art)
cur.execute(qry, val)
cnx.commit()
cnx.close()
def GrabRSS(RssURL, SourceName):
hdrs = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'}
NewsFeed = feedparser.parse(RssURL)
for na in NewsFeed.entries:
try:
print(na.title)
print(na.link)
print(na.published)
print(na.published_parsed)
except:
continue
if AlreadySaved(na.link):
continue
print("*************************")
response = get(na.link, None, headers=hdrs)
print(na.keys())
soup = BS(response.content, 'html.parser')
txtChunk = ""
for data in soup.find_all("p"):
txtval = data.get_text()
txtval = txtval.strip()
txtarr = txtval.split()
if len(txtarr) == 1:
continue
if "posted" in txtval and ("hours" in txtval or "days" in txtval) and len(txtarr) == 4:
continue
if txtval == "No Search Results Found":
continue
if txtval == "Terms of Service":
continue
if txtval == "Advertise with us":
continue
if txtval == "Media Inquiries":
continue
txtChunk += " " + txtval + "\n"
tyr = na.published_parsed[0]
tmn = na.published_parsed[1]
tdy = na.published_parsed[2]
thr = na.published_parsed[3]
tmi = na.published_parsed[4]
tsc = na.published_parsed[5]
ptms = "%s-%s-%s %s:%s:%s" % (tyr, tmn, tdy, thr, tmi, tsc)
SaveRSS(SourceName, na.title, na.link, ptms, txtChunk.strip())
print(txtChunk.strip())
def debugHere():
input("Press enter to continue ...")
def clearConsole():
command = 'clear'
if os.name in ('nt', 'dos'): # If Machine is running on Windows, use cls
command = 'cls'
os.system(command)
def CycleFeeds():
infowars = "https://www.infowars.com/rss.xml"
zh = "https://feeds.feedburner.com/zerohedge/feed"
yahoo = "https://news.yahoo.com/rss/"
cnn = "http://rss.cnn.com/rss/cnn_topstories.rss"
bbc = "http://feeds.bbci.co.uk/news/world/us_and_canada/rss.xml"
nyt = "https://rss.nytimes.com/services/xml/rss/nyt/HomePage.xml"
onion = "https://www.theonion.com/rss"
bb = "https://babylonbee.com/feed"
print("Grabbing Babylon Bee ...")
GrabRSS(bb, "BB")
print("Grabbing ONION ...")
GrabRSS(onion, "ONION")
print("Grabbing INFOWARS ...")
GrabRSS(infowars, "INFOWARS")
print("Grabbing ZEROHEDGE ...")
GrabRSS(zh, "ZEROHEDGE")
#print("Grabbing YAHOO ...")
#GrabRSS(yahoo, "YAHOO")
print("Grabbing CNN ...")
GrabRSS(cnn, "CNN")
print("Grabbing BBC ...")
GrabRSS(bbc, "BBC")
print("Grabbing NYT ...")
GrabRSS(nyt, "NYT")
# FEEDS:
# 1. INFOWARS: https://www.infowars.com/rss.xml
# 2. ZEROHEDGE: https://feeds.feedburner.com/zerohedge/feed
# 3. YAHOO: https://news.yahoo.com/rss/
# 4. CNN: http://rss.cnn.com/rss/cnn_topstories.rss
time.sleep(1)
CycleFeeds()
Python Speech Synthesis Scripts
A: Windows – SAPI
#this script reads headlines from the RSS news feed
#database.
import win32com.client
speaker = win32com.client.Dispatch("SAPI.SpVoice")
import os
import time
import mysql.connector
usern = "root"
passw = "password"
dbn = "NEWS"
servern = "localhost"
portn = 3306
def TOS(text):
os.system(f"espeak -s 130 -v en+m1 '{text}'")
def GetSql(qry):
# Connect with the MySQL Server
cnx = mysql.connector.connect(user=usern, database=dbn, password=passw, host=servern, port=portn)
cur = cnx.cursor(buffered=True)
cur.execute(qry)
retRes = cur.fetchall()
cnx.close()
return retRes
#+-----------+--------------+------+-----+---------+----------------+
#| Field | Type | Null | Key | Default | Extra |
#+-----------+--------------+------+-----+---------+----------------+
#| ID | bigint(20) | NO | PRI | NULL | auto_increment |
#| SOURCE | varchar(100) | NO | | NULL | |
#| LINK | varchar(255) | NO | UNI | NULL | |
#| TITLE | varchar(400) | NO | | NULL | |
#| PUBLISHED | datetime | NO | | NULL | |
#| ARTICLE | text | NO | | NULL | |
#+-----------+--------------+------+-----+---------+----------------+
qry1 = "select SOURCE, TITLE from RSS where PUBLISHED > curdate()-1 order by PUBLISHED desc;"
res = GetSql(qry1)
for rec in res:
src = rec[0]
tit = rec[1].replace("''", "")
print(src + ": " + tit)
phrase = "From " + src + ", HEAD LINE, " + tit
speaker.Speak(phrase)
time.sleep(2)
B: Linux – ESPEAK
import os
import time
import mysql.connector
usern = "root"
passw = "password"
dbn = "NEWS"
servern = "localhost"
portn = 3306
def TOS(text):
os.system(f"espeak -s 130 -v en+m1 '{text}'")
def GetSql(qry):
# Connect with the MySQL Server
cnx = mysql.connector.connect(user=usern, database=dbn, password=passw, host=servern, port=portn)
cur = cnx.cursor(buffered=True)
cur.execute(qry)
retRes = cur.fetchall()
cnx.close()
return retRes
#+-----------+--------------+------+-----+---------+----------------+
#| Field | Type | Null | Key | Default | Extra |
#+-----------+--------------+------+-----+---------+----------------+
#| ID | bigint(20) | NO | PRI | NULL | auto_increment |
#| SOURCE | varchar(100) | NO | | NULL | |
#| LINK | varchar(255) | NO | UNI | NULL | |
#| TITLE | varchar(400) | NO | | NULL | |
#| PUBLISHED | datetime | NO | | NULL | |
#| ARTICLE | text | NO | | NULL | |
#+-----------+--------------+------+-----+---------+----------------+
qry1 = "select SOURCE, TITLE from RSS where PUBLISHED > curdate()-1 order by PUBLISHED desc;"
res = GetSql(qry1)
for rec in res:
src = rec[0]
tit = rec[1].replace("''", "")
print(src + ": " + tit)
phrase = "From " + src + ", HEAD LINE, " + tit
TOS(phrase)
time.sleep(0.5)