restructure to improve multi-user performance

git-svn-id: svn+ssh://svn.gnucash.org/repo/gnucash/trunk@5037 57a11ea4-9604-0410-9ed3-97b8803252fd
This commit is contained in:
Linas Vepstas 2001-07-27 06:22:02 +00:00
parent 27fa0eac94
commit fc06934106
5 changed files with 265 additions and 241 deletions

View File

@ -291,77 +291,125 @@ static const char *table_drop_str =
static gpointer
query_cb (PGBackend *be, PGresult *result, int j, gpointer data)
{
GHashTable *xaction_hash = (GHashTable *) data;
GUID *trans_guid;
GList *xaction_list = (GList *) data;
GUID trans_guid;
Transaction *trans;
gnc_commodity *currency;
Timespec ts;
/* find the transaction this goes into */
trans_guid = xaccGUIDMalloc();
*trans_guid = nullguid; /* just in case the read fails ... */
string_to_guid (DB_GET_VAL("transGUID",j), trans_guid);
trans_guid = nullguid; /* just in case the read fails ... */
string_to_guid (DB_GET_VAL("transGUID",j), &trans_guid);
/* use markers to avoid redundant traversals of transactions we've
* already checked recently. */
trans = xaccTransLookup (trans_guid);
if (NULL != trans && 0 != trans->marker)
trans = xaccTransLookup (&trans_guid);
if (NULL != trans)
{
xaccGUIDFree (trans_guid);
return xaction_hash;
if (0 != trans->marker)
{
return xaction_list;
}
else
{
gint32 db_version, cache_version;
db_version = atoi (DB_GET_VAL("version",j));
cache_version = xaccTransGetVersion (trans);
if (db_version <= cache_version) {
return xaction_list;
}
xaccTransBeginEdit (trans);
}
}
else
{
trans = xaccMallocTransaction();
xaccTransBeginEdit (trans);
xaccTransSetGUID (trans, &trans_guid);
}
/* don't put transaction into the list more than once ... */
if (g_hash_table_lookup (xaction_hash, trans_guid))
{
xaccGUIDFree (trans_guid);
return xaction_hash;
}
g_hash_table_insert (xaction_hash, trans_guid, 0);
xaccTransSetNum (trans, DB_GET_VAL("num",j));
xaccTransSetDescription (trans, DB_GET_VAL("description",j));
ts = gnc_iso8601_to_timespec_local (DB_GET_VAL("date_posted",j));
xaccTransSetDatePostedTS (trans, &ts);
ts = gnc_iso8601_to_timespec_local (DB_GET_VAL("date_entered",j));
xaccTransSetDateEnteredTS (trans, &ts);
xaccTransSetVersion (trans, atoi(DB_GET_VAL("version",j)));
return xaction_hash;
currency = gnc_string_to_commodity (DB_GET_VAL("currency",j));
xaccTransSetCurrency (trans, currency);
trans->marker = 1;
xaction_list = g_list_prepend (xaction_list, trans);
return xaction_list;
}
typedef struct _ctxt {
PGBackend *be;
GList *acct_list;
} ctxt;
typedef struct acct_earliest {
Account *acct;
Timespec ts;
} AcctEarliest;
static int ncalls = 0;
static void
for_each_txn (gpointer key, gpointer value, gpointer user_data)
pgendFillOutToCheckpoint (PGBackend *be, const char *query_string)
{
GUID *trans_guid = (GUID *)key;
ctxt *ct = (ctxt *) user_data;
PGBackend *be = ct->be;
GList *anode, *acct_list = ct->acct_list;
int call_count = ncalls;
int nact=0;
GList *xaction_list = NULL;
GList *node, *anode, *acct_list = NULL;
Transaction *trans;
int engine_data_is_newer;
ENTER (" ");
if (!be) return;
/* use markers to avoid redundant traversals of transactions we've
* already checked recently. */
trans = xaccTransLookup (trans_guid);
if (NULL == trans || 0 == trans->marker)
if (0 == ncalls) {
START_CLOCK (9, "starting at level 0");
}
else
{
engine_data_is_newer = pgendCopyTransactionToEngine (be, trans_guid);
trans = xaccTransLookup (trans_guid);
trans->marker = 1;
PINFO ("copy result=%d", engine_data_is_newer);
REPORT_CLOCK (9, "call count %d", call_count);
}
else
ncalls ++;
SEND_QUERY (be, query_string, );
xaction_list = pgendGetResults (be, query_cb, NULL);
REPORT_CLOCK (9, "fetched results at call %d", call_count);
/* restore the splits for these transactions */
for (node=xaction_list; node; node=node->next)
{
PINFO ("avoided scan");
engine_data_is_newer = 1;
Transaction *trans = (Transaction *) node->data;
GList *engine_splits, *snode;
pgendCopySplitsToEngine (be, trans);
xaccTransCommitEdit (trans);
}
/* if we restored this transaction from the db, scan over the accounts
* it affects and see how far back the data goes.
*/
if (0 > engine_data_is_newer)
#if 0
/* hack alert !! deal with kvp later -- huge sucking sound ! */
/* restore any kvp data associated with the transaction and splits */
for (node=xaction_list; node; node=node->next)
{
Transaction *trans = (Transaction *) node->data;
GList *engine_splits, *snode;
trans->kvp_data = pgendKVPFetch (be, &(trans->guid), trans->kvp_data);
engine_splits = xaccTransGetSplitList(trans);
for (snode = engine_splits; snode; snode=snode->next)
{
Split *s = snode->data;
s->kvp_data = pgendKVPFetch (be, &(s->guid), s->kvp_data);
}
xaccTransCommitEdit (trans);
}
#endif
/* run the fill-out algorithm */
for (node=xaction_list; node; node=node->next)
{
Transaction *trans = (Transaction *) node->data;
GList *split_list, *snode;
Timespec ts;
@ -379,7 +427,7 @@ for_each_txn (gpointer key, gpointer value, gpointer user_data)
Account *acc = xaccSplitGetAccount (s);
/* lets see if we have a record of this account already */
for (anode = ct->acct_list; anode; anode = anode->next)
for (anode = acct_list; anode; anode = anode->next)
{
AcctEarliest * ae = (AcctEarliest *) anode->data;
if (ae->acct == acc)
@ -399,55 +447,12 @@ for_each_txn (gpointer key, gpointer value, gpointer user_data)
AcctEarliest * ae = g_new (AcctEarliest, 1);
ae->acct = acc;
ae->ts = ts;
ct->acct_list = g_list_prepend (ct->acct_list, ae);
acct_list = g_list_prepend (acct_list, ae);
}
}
}
}
g_list_free (xaction_list);
static gboolean
for_each_remove (gpointer key, gpointer value, gpointer user_data)
{
GUID *trans_guid = (GUID *)key;
xaccGUIDFree (trans_guid);
return TRUE;
}
static int ncalls = 0;
static void
pgendFillOutToCheckpoint (PGBackend *be, const char *query_string)
{
int call_count = ncalls;
int nact=0;
GHashTable *xaction_hash = NULL;
GList *node, *anode, *acct_list = NULL;
ctxt ct;
ENTER (" ");
if (!be) return;
if (0 == ncalls) {
START_CLOCK (9, "starting at level 0");
}
else
{
REPORT_CLOCK (9, "call count %d", call_count);
}
ncalls ++;
SEND_QUERY (be, query_string, );
xaction_hash = g_hash_table_new (g_direct_hash, (GCompareFunc) guid_equal);
pgendGetResults (be, query_cb, xaction_hash);
REPORT_CLOCK (9, "fetched results at call %d", call_count);
/* restore the transactions */
ct.be = be;
ct.acct_list = NULL;
g_hash_table_foreach (xaction_hash, for_each_txn, &ct);
g_hash_table_foreach_remove (xaction_hash, for_each_remove, NULL);
g_hash_table_destroy (xaction_hash);
acct_list = ct.acct_list;
REPORT_CLOCK (9, "done gathering at call %d", call_count);
if (NULL == acct_list) return;
@ -468,8 +473,8 @@ pgendFillOutToCheckpoint (PGBackend *be, const char *query_string)
* GetBalance goes to less-then-or-equal-to because of the BETWEEN
* that appears in the gncSubTotalBalance sql function. */
p = be->buff; *p = 0;
p = stpcpy (p, "SELECT DISTINCT gncEntry.transGuid from gncEntry, gncTransaction WHERE "
" gncEntry.transGuid = gncTransaction.transGuid AND accountGuid='");
p = stpcpy (p, "SELECT DISTINCT gncTransaction.* from gncEntry, gncTransaction WHERE "
" gncEntry.transGuid = gncTransaction.transGuid AND gncEntry.accountGuid='");
p = guid_to_string_buff(xaccAccountGetGUID(ae->acct), p);
p = stpcpy (p, "' AND gncTransaction.date_posted > '");
p = gnc_timespec_to_iso8601_buff (ae->ts, p);

View File

@ -218,7 +218,7 @@ In 'single-user-update' mode, data loads from the sql database
should be 1.5x faster than comparable loads from the XML flat file,
at least for medium datasets (measured at 3.5 seconds on a 700MHz
Athalon a dataset with 3K transactions and 150 accounts, vs. 4.8
seconds loading from file).
seconds loading from file; postgres version 7.1.2).
Hitting the 'save' button is a no-op and takes no cpu cycles.
(date is saved as its modified, so a global save is not needed).
@ -228,6 +228,10 @@ issued. The 'vacuum' reclaims storage, and 'analyze' does some
performance tuning. Doing this regularly improves performance
about 20% in the cases I looked at.
---------
Accounts are restored roughly at the rate of 50-75 per second
(for above hardware/software config).
---------
Performance in multi-user mode is still a can of worms, and will
be a good bit slower for now. The working assumptions are that

View File

@ -560,7 +560,7 @@ sqlQuery_build (sqlQuery *sq, Query *q)
/* reset the buffer pointers */
sq->pq = sq->q_base;
sq->pq = stpcpy(sq->pq,
"SELECT DISTINCT gncTransaction.transGuid ");
"SELECT DISTINCT gncTransaction.* ");
/* For SELECT DISTINCT, ORDER BY expressions must appear in target list */
sq->pq = sql_sort_distinct (sq->pq, xaccQueryGetPrimarySortOrder(q));

View File

@ -199,7 +199,7 @@ pgendStoreTransactionNoLock (PGBackend *be, Transaction *trans,
/* If this trans is marked for deletetion, use the 'orig' values
* as the base for recording the audit. This wouldn't be normally
* reqquired, except that otherwise one gets a trashed currency
* required, except that otherwise one gets a trashed currency
* value.
*/
pgendStoreAuditTransaction (be, trans->orig, SQL_DELETE);
@ -340,159 +340,22 @@ pgendStoreAllTransactions (PGBackend *be, AccountGroup *grp)
* probably be fixed.
*/
int
pgendCopyTransactionToEngine (PGBackend *be, const GUID *trans_guid)
void
pgendCopySplitsToEngine (PGBackend *be, Transaction *trans)
{
char *pbuff;
Transaction *trans;
PGresult *result;
Account *acc, *previous_acc=NULL;
gboolean do_set_guid=FALSE;
int engine_data_is_newer = 0;
int i, j, nrows;
PGresult *result;
int save_state = 1;
const GUID *trans_guid;
Account *acc, *previous_acc=NULL;
GList *node, *db_splits=NULL, *engine_splits, *delete_splits=NULL;
gnc_commodity *currency = NULL;
gint64 trans_frac = 0;
ENTER ("be=%p", be);
if (!be || !trans_guid) return 0;
/* disable callbacks into the backend, and events to GUI */
gnc_engine_suspend_events();
pgendDisable(be);
/* first, see if we already have such a transaction */
trans = xaccTransLookup (trans_guid);
if (!trans)
{
trans = xaccMallocTransaction();
do_set_guid=TRUE;
engine_data_is_newer = -1;
}
else
{
/* save some performance, don't go to the backend if the data is recent. */
if (MAX_VERSION_AGE >= be->version_check - trans->version_check)
{
PINFO ("fresh data, skip check");
pgendEnable(be);
gnc_engine_resume_events();
return 0;
}
}
/* build the sql query to get the transaction */
pbuff = be->buff;
pbuff[0] = 0;
pbuff = stpcpy (pbuff,
"SELECT * FROM gncTransaction WHERE transGuid='");
pbuff = guid_to_string_buff(trans_guid, pbuff);
pbuff = stpcpy (pbuff, "';");
SEND_QUERY (be,be->buff, 0);
i=0; nrows=0;
do {
GET_RESULTS (be->connection, result);
{
int jrows;
int ncols = PQnfields (result);
jrows = PQntuples (result);
nrows += jrows;
PINFO ("query result %d has %d rows and %d cols",
i, nrows, ncols);
j = 0;
if (0 == nrows)
{
PQclear (result);
/* I beleive its a programming error to get this case.
* Print a warning for now... */
PERR ("no such transaction in the database. This is unexpected ...\n");
xaccBackendSetError (&be->be, ERR_SQL_MISSING_DATA);
pgendEnable(be);
gnc_engine_resume_events();
return 0;
}
if (1 < nrows)
{
/* since the guid is primary key, this error is totally
* and completely impossible, theoretically ... */
PERR ("!!!!!!!!!!!SQL database is corrupt!!!!!!!\n"
"too many transactions with GUID=%s\n",
guid_to_string (trans_guid));
if (jrows != nrows) xaccTransCommitEdit (trans);
xaccBackendSetError (&be->be, ERR_BACKEND_DATA_CORRUPT);
pgendEnable(be);
gnc_engine_resume_events();
return 0;
}
/* First order of business is to determine whose data is
* newer: the engine cache, or the database. If the
* database has newer stuff, we update the engine. If the
* engine is equal or newer, we do nothing in this routine.
* Of course, we know the database has newer data if this
* transaction doesn't exist in the engine yet.
*/
if (!do_set_guid)
{
gint32 db_version, cache_version;
db_version = atoi (DB_GET_VAL("version",j));
cache_version = xaccTransGetVersion (trans);
if (db_version == cache_version) {
engine_data_is_newer = 0;
} else
if (db_version < cache_version) {
engine_data_is_newer = +1;
} else {
engine_data_is_newer = -1;
}
}
/* if the DB data is newer, copy it to engine */
if (0 > engine_data_is_newer)
{
Timespec ts;
xaccTransBeginEdit (trans);
if (do_set_guid) xaccTransSetGUID (trans, trans_guid);
xaccTransSetNum (trans, DB_GET_VAL("num",j));
xaccTransSetDescription (trans, DB_GET_VAL("description",j));
ts = gnc_iso8601_to_timespec_local (DB_GET_VAL("date_posted",j));
xaccTransSetDatePostedTS (trans, &ts);
ts = gnc_iso8601_to_timespec_local (DB_GET_VAL("date_entered",j));
xaccTransSetDateEnteredTS (trans, &ts);
xaccTransSetVersion (trans, atoi(DB_GET_VAL("version",j)));
currency = gnc_string_to_commodity (DB_GET_VAL("currency",j));
trans_frac = gnc_commodity_get_fraction (currency);
xaccTransSetCurrency
(trans, gnc_string_to_commodity (DB_GET_VAL("currency",j)));
}
}
PQclear (result);
i++;
} while (result);
/* set timestamp as 'recent' for this data */
trans->version_check = be->version_check;
/* if engine data was newer, we are done */
if (0 <= engine_data_is_newer)
{
pgendEnable(be);
gnc_engine_resume_events();
return engine_data_is_newer;
}
/* ------------------------------------------------- */
/* If we are here, then the sql database contains data that is
* newer than what we have in the engine. And so, below,
* we finish the job of yanking data out of the db.
*/
trans_guid = xaccTransGetGUID (trans);
currency = xaccTransGetCurrency (trans);
trans_frac = gnc_commodity_get_fraction (currency);
/* build the sql query the splits */
pbuff = be->buff;
@ -502,7 +365,7 @@ pgendCopyTransactionToEngine (PGBackend *be, const GUID *trans_guid)
pbuff = guid_to_string_buff(trans_guid, pbuff);
pbuff = stpcpy (pbuff, "';");
SEND_QUERY (be,be->buff, 0);
SEND_QUERY (be,be->buff, );
i=0; nrows=0;
do {
GET_RESULTS (be->connection, result);
@ -628,6 +491,156 @@ pgendCopyTransactionToEngine (PGBackend *be, const GUID *trans_guid)
g_list_free (delete_splits);
g_list_free (db_splits);
}
int
pgendCopyTransactionToEngine (PGBackend *be, const GUID *trans_guid)
{
char *pbuff;
Transaction *trans;
PGresult *result;
gboolean do_set_guid=FALSE;
int engine_data_is_newer = 0;
int i, j, nrows;
GList *node, *engine_splits;
ENTER ("be=%p", be);
if (!be || !trans_guid) return 0;
/* disable callbacks into the backend, and events to GUI */
gnc_engine_suspend_events();
pgendDisable(be);
/* first, see if we already have such a transaction */
trans = xaccTransLookup (trans_guid);
if (!trans)
{
trans = xaccMallocTransaction();
do_set_guid=TRUE;
engine_data_is_newer = -1;
}
else
{
/* save some performance, don't go to the backend if the data is recent. */
if (MAX_VERSION_AGE >= be->version_check - trans->version_check)
{
PINFO ("fresh data, skip check");
pgendEnable(be);
gnc_engine_resume_events();
return 0;
}
}
/* build the sql query to get the transaction */
pbuff = be->buff;
pbuff[0] = 0;
pbuff = stpcpy (pbuff,
"SELECT * FROM gncTransaction WHERE transGuid='");
pbuff = guid_to_string_buff(trans_guid, pbuff);
pbuff = stpcpy (pbuff, "';");
SEND_QUERY (be,be->buff, 0);
i=0; nrows=0;
do {
GET_RESULTS (be->connection, result);
{
int jrows;
int ncols = PQnfields (result);
jrows = PQntuples (result);
nrows += jrows;
PINFO ("query result %d has %d rows and %d cols",
i, nrows, ncols);
j = 0;
if (0 == nrows)
{
PQclear (result);
/* I beleive its a programming error to get this case.
* Print a warning for now... */
PERR ("no such transaction in the database. This is unexpected ...\n");
xaccBackendSetError (&be->be, ERR_SQL_MISSING_DATA);
pgendEnable(be);
gnc_engine_resume_events();
return 0;
}
if (1 < nrows)
{
/* since the guid is primary key, this error is totally
* and completely impossible, theoretically ... */
PERR ("!!!!!!!!!!!SQL database is corrupt!!!!!!!\n"
"too many transactions with GUID=%s\n",
guid_to_string (trans_guid));
if (jrows != nrows) xaccTransCommitEdit (trans);
xaccBackendSetError (&be->be, ERR_BACKEND_DATA_CORRUPT);
pgendEnable(be);
gnc_engine_resume_events();
return 0;
}
/* First order of business is to determine whose data is
* newer: the engine cache, or the database. If the
* database has newer stuff, we update the engine. If the
* engine is equal or newer, we do nothing in this routine.
* Of course, we know the database has newer data if this
* transaction doesn't exist in the engine yet.
*/
if (!do_set_guid)
{
gint32 db_version, cache_version;
db_version = atoi (DB_GET_VAL("version",j));
cache_version = xaccTransGetVersion (trans);
if (db_version == cache_version) {
engine_data_is_newer = 0;
} else
if (db_version < cache_version) {
engine_data_is_newer = +1;
} else {
engine_data_is_newer = -1;
}
}
/* if the DB data is newer, copy it to engine */
if (0 > engine_data_is_newer)
{
Timespec ts;
gnc_commodity *currency;
xaccTransBeginEdit (trans);
if (do_set_guid) xaccTransSetGUID (trans, trans_guid);
xaccTransSetNum (trans, DB_GET_VAL("num",j));
xaccTransSetDescription (trans, DB_GET_VAL("description",j));
ts = gnc_iso8601_to_timespec_local (DB_GET_VAL("date_posted",j));
xaccTransSetDatePostedTS (trans, &ts);
ts = gnc_iso8601_to_timespec_local (DB_GET_VAL("date_entered",j));
xaccTransSetDateEnteredTS (trans, &ts);
xaccTransSetVersion (trans, atoi(DB_GET_VAL("version",j)));
currency = gnc_string_to_commodity (DB_GET_VAL("currency",j));
xaccTransSetCurrency (trans, currency);
}
}
PQclear (result);
i++;
} while (result);
/* set timestamp as 'recent' for this data */
trans->version_check = be->version_check;
/* if engine data was newer, we are done */
if (0 <= engine_data_is_newer)
{
pgendEnable(be);
gnc_engine_resume_events();
return engine_data_is_newer;
}
/* ------------------------------------------------- */
/* If we are here, then the sql database contains data that is
* newer than what we have in the engine. And so, below,
* we finish the job of yanking data out of the db.
*/
pgendCopySplitsToEngine (be, trans);
/* ------------------------------------------------- */
/* restore any kvp data associated with the transaction and splits */

View File

@ -43,6 +43,8 @@
#include "PostgresBackend.h"
int pgendCopyTransactionToEngine (PGBackend *be, const GUID *trans_guid);
void pgendCopySplitsToEngine (PGBackend *be, Transaction *trans);
void pgendStoreAllTransactions (PGBackend *be, AccountGroup *grp);
void pgendStoreTransactionNoLock (PGBackend *be, Transaction *trans, gboolean do_check_version);