class Sequel::Postgres::Dataset
Constants
- BindArgumentMethods
- PREPARED_ARG_PLACEHOLDER
-
:nocov:
- PreparedStatementMethods
Public Instance Methods
Source
# File lib/sequel/adapters/postgres.rb 738 def bound_variable_modules 739 [BindArgumentMethods] 740 end
Source
# File lib/sequel/adapters/postgres.rb 649 def fetch_rows(sql) 650 return cursor_fetch_rows(sql){|h| yield h} if @opts[:cursor] 651 execute(sql){|res| yield_hash_rows(res, fetch_rows_set_cols(res)){|h| yield h}} 652 end
Source
# File lib/sequel/adapters/postgres.rb 655 def paged_each(opts=OPTS, &block) 656 unless defined?(yield) 657 return enum_for(:paged_each, opts) 658 end 659 use_cursor(opts).each(&block) 660 end
Use a cursor for paging.
Source
# File lib/sequel/adapters/postgres.rb 748 def prepared_arg_placeholder 749 PREPARED_ARG_PLACEHOLDER 750 end
PostgreSQL uses $N for placeholders instead of ?, so use a $ as the placeholder.
Source
# File lib/sequel/adapters/postgres.rb 742 def prepared_statement_modules 743 [PreparedStatementMethods] 744 end
Source
# File lib/sequel/adapters/postgres.rb 685 def use_cursor(opts=OPTS) 686 clone(:cursor=>{:rows_per_fetch=>1000}.merge!(opts)) 687 end
Uses a cursor for fetching records, instead of fetching the entire result set at once. Note this uses a transaction around the cursor usage by default and can be changed using ‘hold: true` as described below. Cursors can be used to process large datasets without holding all rows in memory (which is what the underlying drivers may do by default). Options:
- :cursor_name
-
The name assigned to the cursor (default ‘sequel_cursor’). Nested cursors require different names.
- :hold
-
Declare the cursor WITH HOLD and don’t use transaction around the cursor usage.
- :rows_per_fetch
-
The number of rows per fetch (default 1000). Higher numbers result in fewer queries but greater memory use.
- :skip_transaction
-
Same as :hold, but :hold takes priority.
Usage:
DB[:huge_table].use_cursor.each{|row| p row} DB[:huge_table].use_cursor(rows_per_fetch: 10000).each{|row| p row} DB[:huge_table].use_cursor(cursor_name: 'my_cursor').each{|row| p row}
This is untested with the prepared statement/bound variable support, and unlikely to work with either.
Source
# File lib/sequel/adapters/postgres.rb 697 def where_current_of(cursor_name='sequel_cursor') 698 clone(:where=>Sequel.lit(['CURRENT OF '], Sequel.identifier(cursor_name))) 699 end
Replace the WHERE clause with one that uses CURRENT OF with the given cursor name (or the default cursor name). This allows you to update a large dataset by updating individual rows while processing the dataset via a cursor:
DB[:huge_table].use_cursor(rows_per_fetch: 1).each do |row| DB[:huge_table].where_current_of.update(column: ruby_method(row)) end
Private Instance Methods
Source
# File lib/sequel/adapters/postgres.rb 756 def call_procedure(name, args) 757 sql = String.new 758 sql << "CALL " 759 identifier_append(sql, name) 760 sql << "(" 761 expression_list_append(sql, args) 762 sql << ")" 763 with_sql_first(sql) 764 end
Generate and execute a procedure call.
Source
# File lib/sequel/adapters/postgres.rb 767 def cursor_fetch_rows(sql) 768 cursor = @opts[:cursor] 769 hold = cursor.fetch(:hold){cursor[:skip_transaction]} 770 server_opts = {:server=>@opts[:server] || :read_only, :skip_transaction=>hold} 771 cursor_name = quote_identifier(cursor[:cursor_name] || 'sequel_cursor') 772 rows_per_fetch = cursor[:rows_per_fetch].to_i 773 774 db.transaction(server_opts) do 775 begin 776 execute_ddl("DECLARE #{cursor_name} NO SCROLL CURSOR WITH#{'OUT' unless hold} HOLD FOR #{sql}", server_opts) 777 rows_per_fetch = 1000 if rows_per_fetch <= 0 778 fetch_sql = "FETCH FORWARD #{rows_per_fetch} FROM #{cursor_name}" 779 cols = nil 780 # Load columns only in the first fetch, so subsequent fetches are faster 781 execute(fetch_sql) do |res| 782 cols = fetch_rows_set_cols(res) 783 yield_hash_rows(res, cols){|h| yield h} 784 return if res.ntuples < rows_per_fetch 785 end 786 while true 787 execute(fetch_sql) do |res| 788 yield_hash_rows(res, cols){|h| yield h} 789 return if res.ntuples < rows_per_fetch 790 end 791 end 792 rescue Exception => e 793 raise 794 ensure 795 begin 796 execute_ddl("CLOSE #{cursor_name}", server_opts) 797 rescue 798 raise e if e 799 raise 800 end 801 end 802 end 803 end
Use a cursor to fetch groups of records at a time, yielding them to the block.
Source
# File lib/sequel/adapters/postgres.rb 807 def fetch_rows_set_cols(res) 808 cols = [] 809 procs = db.conversion_procs 810 res.nfields.times do |fieldnum| 811 cols << [procs[res.ftype(fieldnum)], output_identifier(res.fname(fieldnum))] 812 end 813 self.columns = cols.map{|c| c[1]} 814 cols 815 end
Set the columns based on the result set, and return the array of field numers, type conversion procs, and name symbol arrays.
Source
# File lib/sequel/adapters/postgres.rb 818 def literal_blob_append(sql, v) 819 sql << "'" << db.synchronize(@opts[:server]){|c| c.escape_bytea(v)} << "'" 820 end
Use the driver’s escape_bytea
Source
# File lib/sequel/adapters/postgres.rb 823 def literal_string_append(sql, v) 824 sql << "'" << db.synchronize(@opts[:server]){|c| c.escape_string(v)} << "'" 825 end
Use the driver’s escape_string
Source
# File lib/sequel/adapters/postgres.rb 829 def yield_hash_rows(res, cols) 830 ntuples = res.ntuples 831 recnum = 0 832 while recnum < ntuples 833 fieldnum = 0 834 nfields = cols.length 835 converted_rec = {} 836 while fieldnum < nfields 837 type_proc, fieldsym = cols[fieldnum] 838 value = res.getvalue(recnum, fieldnum) 839 converted_rec[fieldsym] = (value && type_proc) ? type_proc.call(value) : value 840 fieldnum += 1 841 end 842 yield converted_rec 843 recnum += 1 844 end 845 end
For each row in the result set, yield a hash with column name symbol keys and typecasted values.