blob: 5c98df6207281771e4e9dffbc877e9eb37c645a0 [file] [log] [blame]
op {
graph_op_name: "ResourceSparseApplyAdagradV2"
visibility: HIDDEN
in_arg {
name: "var"
description: <<END
Should be from a Variable().
END
}
in_arg {
name: "accum"
description: <<END
Should be from a Variable().
END
}
in_arg {
name: "lr"
description: <<END
Learning rate. Must be a scalar.
END
}
in_arg {
name: "epsilon"
description: <<END
Constant factor. Must be a scalar.
END
}
in_arg {
name: "grad"
description: <<END
The gradient.
END
}
in_arg {
name: "indices"
description: <<END
A vector of indices into the first dimension of var and accum.
END
}
attr {
name: "use_locking"
description: <<END
If `True`, updating of the var and accum tensors will be protected
by a lock; otherwise the behavior is undefined, but may exhibit less
contention.
END
}
summary: "Update relevant entries in \'*var\' and \'*accum\' according to the adagrad scheme."
description: <<END
That is for rows we have grad for, we update var and accum as follows:
accum += grad * grad
var -= lr * grad * (1 / sqrt(accum))
END
}