Some changes to adhere better to the gnucash coding guidelines

- use m_ prefix on member variables
- prefer passing by reference over copying
This commit is contained in:
Geert Janssens
2016-09-14 18:49:06 +02:00
committed by Geert Janssens
parent 737cbfb35b
commit bbeb351aa8
8 changed files with 70 additions and 69 deletions

View File

@@ -13,15 +13,15 @@
void
GncCsvTokenizer::set_separators(const std::string& separators)
{
sep_str = separators;
m_sep_str = separators;
}
int GncCsvTokenizer::tokenize()
{
typedef boost::tokenizer< boost::escaped_list_separator<char> > Tokenizer;
using Tokenizer = boost::tokenizer< boost::escaped_list_separator<char>>;
boost::escaped_list_separator<char> sep("\\", sep_str, "\"");
boost::escaped_list_separator<char> sep("\\", m_sep_str, "\"");
std::vector<std::string> vec;
std::string line;
@@ -30,8 +30,8 @@ int GncCsvTokenizer::tokenize()
bool inside_quotes(false);
size_t last_quote(0);
tokenized_contents.clear();
std::istringstream in_stream(utf8_contents);
m_tokenized_contents.clear();
std::istringstream in_stream(m_utf8_contents);
while (std::getline (in_stream, buffer))
{
@@ -64,7 +64,7 @@ int GncCsvTokenizer::tokenize()
// for correctly parsed 3 fields per record
// if (vec.size() < 3) continue;
tokenized_contents.push_back(vec);
m_tokenized_contents.push_back(vec);
}
return 0;

View File

@@ -58,7 +58,7 @@ public:
int tokenize() override;
private:
std::string sep_str = ",";
std::string m_sep_str = ",";
};
#endif

View File

@@ -15,13 +15,13 @@ int GncDummyTokenizer::tokenize()
std::vector<std::string> vec;
std::string line;
tokenized_contents.clear();
std::istringstream in_stream(utf8_contents);
m_tokenized_contents.clear();
std::istringstream in_stream(m_utf8_contents);
while (std::getline (in_stream, line))
{
vec.push_back (line);
tokenized_contents.push_back(vec);
m_tokenized_contents.push_back(vec);
line.clear();
vec.clear();

View File

@@ -14,13 +14,13 @@
void
GncFwTokenizer::columns(const std::vector<uint>& cols)
{
col_vec = cols;
m_col_vec = cols;
}
bool GncFwTokenizer::col_can_add (uint col_end)
{
if (col_end < 0 || col_end > longest_line)
if (col_end < 0 || col_end > m_longest_line)
return false;
else
return true;
@@ -30,23 +30,23 @@ void GncFwTokenizer::col_add (uint col_end)
{
if (col_can_add (col_end))
{
for (auto col_it = col_vec.begin(); col_it != col_vec.end(); col_it++)
for (auto col_it = m_col_vec.begin(); col_it != m_col_vec.end(); col_it++)
{
if (*col_it == col_end)
return; // don't add same column end twice in the column list
if (*col_it > col_end)
col_vec.insert (col_it, col_end);
m_col_vec.insert (col_it, col_end);
}
// If we got here that means the requested col_end is beyond the currently
// inserted columns, so append it
col_vec.push_back (col_end);
m_col_vec.push_back (col_end);
}
}
bool GncFwTokenizer::col_can_delete (uint col_num)
{
auto last_col = col_vec.size() - 1;
auto last_col = m_col_vec.size() - 1;
if (col_num < 0 || col_num > last_col)
return false;
else
@@ -56,19 +56,19 @@ bool GncFwTokenizer::col_can_delete (uint col_num)
void GncFwTokenizer::col_delete (uint col_num)
{
if (col_can_delete (col_num))
col_vec.erase (col_vec.begin() + col_num);
m_col_vec.erase (m_col_vec.begin() + col_num);
}
bool GncFwTokenizer::col_can_narrow (uint col_num)
{
auto last_col = col_vec.size() - 1;
auto last_col = m_col_vec.size() - 1;
int col_start, next_col_start;
if (col_num > last_col)
return false;
col_start = (col_num == 0) ? 0 : col_vec[col_num - 1];
next_col_start = col_vec[col_num];
col_start = (col_num == 0) ? 0 : m_col_vec[col_num - 1];
next_col_start = m_col_vec[col_num];
if (next_col_start - 1 <= col_start)
return false;
@@ -79,21 +79,21 @@ bool GncFwTokenizer::col_can_narrow (uint col_num)
void GncFwTokenizer::col_narrow (uint col_num)
{
if (col_can_narrow (col_num))
col_vec[col_num]--;
m_col_vec[col_num]--;
}
bool GncFwTokenizer::col_can_widen (uint col_num)
{
auto last_col = col_vec.size() - 1;
auto last_col = m_col_vec.size() - 1;
int col_end, next_col_end;
if (col_num > last_col)
return false;
col_end = col_vec[col_num];
col_end = m_col_vec[col_num];
next_col_end = (col_num == last_col - 1)
? longest_line
: col_vec[col_num + 1];
? m_longest_line
: m_col_vec[col_num + 1];
if (col_end + 1 >= next_col_end)
return false;
@@ -104,17 +104,17 @@ bool GncFwTokenizer::col_can_widen (uint col_num)
void GncFwTokenizer::col_widen (uint col_num)
{
if (col_can_widen (col_num))
col_vec[col_num]++;
m_col_vec[col_num]++;
}
bool GncFwTokenizer::col_can_split (uint col_num, uint position)
{
auto last_col = col_vec.size() - 1;
auto last_col = m_col_vec.size() - 1;
if (col_num > last_col)
return false;
uint col_start = (col_num == 0) ? 0 : col_vec[col_num - 1];
uint col_end = col_vec[col_num];
uint col_start = (col_num == 0) ? 0 : m_col_vec[col_num - 1];
uint col_end = m_col_vec[col_num];
if (position <= col_start || position >= col_end)
return false;
else
@@ -125,8 +125,8 @@ void GncFwTokenizer::col_split (uint col_num, uint position)
{
if (col_can_split (col_num, position))
{
uint col_start = (col_num == 0) ? 0 : col_vec[col_num - 1];;
col_vec.insert (col_vec.begin() + col_num, col_start + position);
uint col_start = (col_num == 0) ? 0 : m_col_vec[col_num - 1];;
m_col_vec.insert (m_col_vec.begin() + col_num, col_start + position);
}
}
@@ -134,7 +134,7 @@ void GncFwTokenizer::col_split (uint col_num, uint position)
std::string GncFwTokenizer::cols_to_string()
{
std::ostringstream colstream;
for (auto col_end : col_vec)
for (auto col_end : m_col_vec)
colstream<<col_end<<",";
std::string colstr = colstream.str();
if (!colstr.empty())
@@ -165,12 +165,12 @@ void GncFwTokenizer::load_file(const std::string& path)
GncTokenizer::load_file(path);
std::string line;
longest_line = 0;
std::istringstream in_stream(utf8_contents);
m_longest_line = 0;
std::istringstream in_stream(m_utf8_contents);
while (std::getline (in_stream, line))
{
if (line.size() > longest_line)
longest_line = line.size();
if (line.size() > m_longest_line)
m_longest_line = line.size();
line.clear();
}
@@ -181,14 +181,14 @@ int GncFwTokenizer::tokenize()
{
typedef boost::tokenizer< boost::offset_separator > Tokenizer;
boost::offset_separator sep(col_vec.begin(), col_vec.end(), false);
boost::offset_separator sep(m_col_vec.begin(), m_col_vec.end(), false);
std::vector<std::string> vec;
std::string line;
std::string buffer;
tokenized_contents.clear();
std::istringstream in_stream(utf8_contents);
m_tokenized_contents.clear();
std::istringstream in_stream(m_utf8_contents);
while (std::getline (in_stream, line))
{
@@ -208,7 +208,7 @@ int GncFwTokenizer::tokenize()
// for correctly parsed 3 fields per record
// if (vec.size() < 3) continue;
tokenized_contents.push_back(vec);
m_tokenized_contents.push_back(vec);
}
return 0;

View File

@@ -80,8 +80,8 @@ public:
private:
std::vector<uint> col_vec;
uint longest_line;
std::vector<uint> m_col_vec;
uint m_longest_line;
};
#endif

View File

@@ -42,49 +42,49 @@ GncTokenizer::load_file(const std::string& path)
if (path.empty())
return;
imp_file_str = path;
m_imp_file_str = path;
std::ifstream in;
in.exceptions ( std::ifstream::failbit | std::ifstream::badbit );
in.open (imp_file_str.c_str(), std::ios::in | std::ios::binary);
in.open (m_imp_file_str.c_str(), std::ios::in | std::ios::binary);
raw_contents.clear();
m_raw_contents.clear();
in.seekg(0, std::ios::end);
raw_contents.resize(in.tellg());
m_raw_contents.resize(in.tellg());
in.seekg(0, std::ios::beg);
in.read(&raw_contents[0], raw_contents.size());
in.read(&m_raw_contents[0], m_raw_contents.size());
in.close();
// Guess encoding, user can override if needed later on.
const char *guessed_enc = NULL;
guessed_enc = go_guess_encoding (raw_contents.c_str(),
raw_contents.length(),
enc_str.empty() ? "UTF-8" : enc_str.c_str(),
guessed_enc = go_guess_encoding (m_raw_contents.c_str(),
m_raw_contents.length(),
m_enc_str.empty() ? "UTF-8" : m_enc_str.c_str(),
NULL);
if (guessed_enc)
this->encoding(guessed_enc);
else
enc_str.clear();
m_enc_str.clear();
}
std::string
const std::string&
GncTokenizer::current_file()
{
return imp_file_str;
return m_imp_file_str;
}
void
GncTokenizer::encoding(const std::string& encoding)
{
enc_str = encoding;
utf8_contents = boost::locale::conv::to_utf<char>(raw_contents, enc_str);
m_enc_str = encoding;
m_utf8_contents = boost::locale::conv::to_utf<char>(m_raw_contents, m_enc_str);
}
std::string
const std::string&
GncTokenizer::encoding()
{
return enc_str;
return m_enc_str;
}
@@ -94,7 +94,8 @@ int GncTokenizer::tokenize()
}
std::vector<str_vec> GncTokenizer::get_tokens()
const std::vector<str_vec>&
GncTokenizer::get_tokens()
{
return tokenized_contents;
return m_tokenized_contents;
}

View File

@@ -68,20 +68,20 @@ public:
virtual ~GncTokenizer() = default; // destructor
void load_file(const std::string& path);
std::string current_file();
const std::string& current_file();
void encoding(const std::string& encoding);
std::string encoding();
const std::string& encoding();
virtual int tokenize();
std::vector<str_vec> get_tokens();
const std::vector<str_vec>& get_tokens();
protected:
std::string utf8_contents;
std::vector<str_vec> tokenized_contents;
std::string m_utf8_contents;
std::vector<str_vec> m_tokenized_contents;
private:
std::string imp_file_str;
std::string raw_contents;
std::string enc_str;
std::string m_imp_file_str;
std::string m_raw_contents;
std::string m_enc_str;
};

View File

@@ -64,9 +64,9 @@ public:
protected:
std::string& get_utf8_contents(std::unique_ptr<GncTokenizer> &tokenizer)
{ return tokenizer->utf8_contents; }
{ return tokenizer->m_utf8_contents; }
void set_utf8_contents(std::unique_ptr<GncTokenizer> &tokenizer, const std::string& newcontents)
{ tokenizer->utf8_contents = newcontents; }
{ tokenizer->m_utf8_contents = newcontents; }
void test_gnc_tokenize_helper (const std::string& separators, tokenize_csv_test_data* test_data); // for csv tokenizer
void test_gnc_tokenize_helper (tokenize_fw_test_data* test_data); // for csv tokenizer